hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d16591a63a4e5172bba80d3831f315475a0f9651
| 1,646
|
py
|
Python
|
app/main/forms.py
|
Celinemmbonekerrine4/pitch-perfect
|
818c2415325e689b75e1d19b5940211229a44704
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
Celinemmbonekerrine4/pitch-perfect
|
818c2415325e689b75e1d19b5940211229a44704
|
[
"MIT"
] | null | null | null |
app/main/forms.py
|
Celinemmbonekerrine4/pitch-perfect
|
818c2415325e689b75e1d19b5940211229a44704
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField,TextAreaField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class LoginForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
remember = BooleanField('Remember me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
password = PasswordField('Password',validators =[Required()])
confirm_password = PasswordField('Password',validators =[Required()])
submit = SubmitField('Sign Up')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class Interview(FlaskForm):
title = StringField('Interview title',validators=[Required()])
pitch = TextAreaField('Interview pitch',validators=[Required()])
submit = SubmitField('Submit')
class BusinessPlan(FlaskForm):
title = StringField('Business plan title',validators=[Required()])
pitch = TextAreaField('Business plan pitch',validators=[Required()])
submit = SubmitField('Submit')
class LifePitch(FlaskForm) :
title = StringField('Life-pitch title',validators=[Required()])
pitch = TextAreaField('Life-pitch pitch',validators=[Required()])
submit = SubmitField('Submit')
class Comments(FlaskForm):
comments = TextAreaField('Any Comments??',validators=[Required()])
submit = SubmitField('Comments...')
| 34.291667
| 84
| 0.72661
|
08168ef56f97660641adb233b1d298391852678b
| 6,686
|
py
|
Python
|
ding/entry/main.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 464
|
2021-07-08T07:26:33.000Z
|
2022-03-31T12:35:16.000Z
|
ding/entry/main.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 177
|
2021-07-09T08:22:55.000Z
|
2022-03-31T07:35:22.000Z
|
ding/entry/main.py
|
LuciusMos/DI-engine
|
b040b1c36afce038effec9eb483f625131573824
|
[
"Apache-2.0"
] | 92
|
2021-07-08T12:16:37.000Z
|
2022-03-31T09:24:41.000Z
|
"""
Main entry
"""
from collections import deque
import torch
import numpy as np
import time
from rich import print
from functools import partial
from ding.model import QAC
from ding.utils import set_pkg_seed
from ding.envs import BaseEnvManager, get_vec_env_setting
from ding.config import compile_config
from ding.policy import SACPolicy
from ding.torch_utils import to_ndarray, to_tensor
from ding.rl_utils import get_epsilon_greedy_fn
from ding.worker.collector.base_serial_evaluator import VectorEvalMonitor
from ding.framework import Task
from dizoo.classic_control.pendulum.config.pendulum_sac_config import main_config, create_config
class DequeBuffer:
"""
For demonstration only
"""
def __init__(self, maxlen=20000) -> None:
self.memory = deque(maxlen=maxlen)
self.n_counter = 0
def push(self, data):
self.memory.append(data)
self.n_counter += 1
def sample(self, size):
if size > len(self.memory):
print('[Warning] no enough data: {}/{}'.format(size, len(self.memory)))
return None
indices = list(np.random.choice(a=len(self.memory), size=size, replace=False))
return [self.memory[i] for i in indices]
# return random.sample(self.memory, size)
def count(self):
return len(self.memory)
class Pipeline:
def __init__(self, cfg, model: torch.nn.Module):
self.cfg = cfg
self.model = model
self.policy = SACPolicy(cfg.policy, model=model)
if 'eps' in cfg.policy.other:
eps_cfg = cfg.policy.other.eps
self.epsilon_greedy = get_epsilon_greedy_fn(eps_cfg.start, eps_cfg.end, eps_cfg.decay, eps_cfg.type)
def act(self, env):
def _act(ctx):
ctx.setdefault("collect_env_step", 0)
ctx.keep("collect_env_step")
ctx.obs = env.ready_obs
policy_kwargs = {}
if hasattr(self, 'epsilon_greedy'):
policy_kwargs['eps'] = self.epsilon_greedy(ctx.collect_env_step)
policy_output = self.policy.collect_mode.forward(ctx.obs, **policy_kwargs)
ctx.action = to_ndarray({env_id: output['action'] for env_id, output in policy_output.items()})
ctx.policy_output = policy_output
return _act
def collect(self, env, buffer_, task: Task):
def _collect(ctx):
timesteps = env.step(ctx.action)
ctx.collect_env_step += len(timesteps)
timesteps = to_tensor(timesteps, dtype=torch.float32)
ctx.collect_transitions = []
for env_id, timestep in timesteps.items():
transition = self.policy.collect_mode.process_transition(
ctx.obs[env_id], ctx.policy_output[env_id], timestep
)
ctx.collect_transitions.append(transition)
buffer_.push(transition)
return _collect
def learn(self, buffer_: DequeBuffer, task: Task):
def _learn(ctx):
ctx.setdefault("train_iter", 0)
ctx.keep("train_iter")
for i in range(self.cfg.policy.learn.update_per_collect):
data = buffer_.sample(self.policy.learn_mode.get_attribute('batch_size'))
if not data:
break
learn_output = self.policy.learn_mode.forward(data)
if ctx.train_iter % 20 == 0:
print(
'Current Training: Train Iter({})\tLoss({:.3f})'.format(
ctx.train_iter, learn_output['total_loss']
)
)
ctx.train_iter += 1
return _learn
def evaluate(self, env):
def _eval(ctx):
ctx.setdefault("train_iter", 0)
ctx.setdefault("last_eval_iter", -1)
ctx.keep("train_iter", "last_eval_iter")
if ctx.train_iter == ctx.last_eval_iter or (
(ctx.train_iter - ctx.last_eval_iter) < self.cfg.policy.eval.evaluator.eval_freq
and ctx.train_iter != 0):
return
env.reset()
eval_monitor = VectorEvalMonitor(env.env_num, self.cfg.env.n_evaluator_episode)
while not eval_monitor.is_finished():
obs = env.ready_obs
obs = to_tensor(obs, dtype=torch.float32)
policy_output = self.policy.eval_mode.forward(obs)
action = to_ndarray({i: a['action'] for i, a in policy_output.items()})
timesteps = env.step(action)
timesteps = to_tensor(timesteps, dtype=torch.float32)
for env_id, timestep in timesteps.items():
if timestep.done:
self.policy.eval_mode.reset([env_id])
reward = timestep.info['final_eval_reward']
eval_monitor.update_reward(env_id, reward)
episode_reward = eval_monitor.get_episode_reward()
eval_reward = np.mean(episode_reward)
stop_flag = eval_reward >= self.cfg.env.stop_value and ctx.train_iter > 0
print('Current Evaluation: Train Iter({})\tEval Reward({:.3f})'.format(ctx.train_iter, eval_reward))
ctx.last_eval_iter = ctx.train_iter
if stop_flag:
ctx.finish = True
return _eval
def main(cfg, model, seed=0):
with Task(async_mode=False) as task:
env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env)
collector_env = BaseEnvManager(env_fn=[partial(env_fn, cfg=c) for c in collector_env_cfg], cfg=cfg.env.manager)
evaluator_env = BaseEnvManager(env_fn=[partial(env_fn, cfg=c) for c in evaluator_env_cfg], cfg=cfg.env.manager)
collector_env.seed(seed)
evaluator_env.seed(seed, dynamic_seed=False)
set_pkg_seed(seed, use_cuda=cfg.policy.cuda)
collector_env.launch()
evaluator_env.launch()
replay_buffer = DequeBuffer()
sac = Pipeline(cfg, model)
# task.use_step_wrapper(StepTimer(print_per_step=1))
task.use(sac.evaluate(evaluator_env), filter_labels=["standalone", "node.0"])
task.use(
task.sequence(sac.act(collector_env), sac.collect(collector_env, replay_buffer, task=task)),
filter_labels=["standalone", "node.[1-9]*"]
)
task.use(sac.learn(replay_buffer, task=task), filter_labels=["standalone", "node.0"])
task.run(max_step=100000)
if __name__ == "__main__":
cfg = compile_config(main_config, create_cfg=create_config, auto=True)
model = QAC(**cfg.policy.model)
main(cfg, model)
| 38.647399
| 119
| 0.618756
|
04c933c7f8b10fb1ade3039bd32f815960ab3cc8
| 12,534
|
py
|
Python
|
src/pyatmo/thermostat.py
|
chpego/pyatmo
|
54cd02c0cd27d1b3a0faf78e6931e53d8e587ea3
|
[
"MIT"
] | 2
|
2020-12-03T20:46:23.000Z
|
2022-01-05T11:42:56.000Z
|
src/pyatmo/thermostat.py
|
chpego/pyatmo
|
54cd02c0cd27d1b3a0faf78e6931e53d8e587ea3
|
[
"MIT"
] | 1
|
2021-07-21T13:13:48.000Z
|
2021-07-23T18:40:36.000Z
|
src/pyatmo/thermostat.py
|
chpego/pyatmo
|
54cd02c0cd27d1b3a0faf78e6931e53d8e587ea3
|
[
"MIT"
] | 1
|
2021-07-21T12:52:04.000Z
|
2021-07-21T12:52:04.000Z
|
"""Support for Netatmo energy devices (relays, thermostats and valves)."""
from __future__ import annotations
import logging
from abc import ABC
from collections import defaultdict
from typing import Any
from .auth import AbstractAsyncAuth, NetatmoOAuth2
from .exceptions import InvalidRoom, NoSchedule
from .helpers import _BASE_URL, extract_raw_data
LOG = logging.getLogger(__name__)
_GETHOMESDATA_REQ = _BASE_URL + "api/homesdata"
_GETHOMESTATUS_REQ = _BASE_URL + "api/homestatus"
_SETTHERMMODE_REQ = _BASE_URL + "api/setthermmode"
_SETROOMTHERMPOINT_REQ = _BASE_URL + "api/setroomthermpoint"
_GETROOMMEASURE_REQ = _BASE_URL + "api/getroommeasure"
_SWITCHHOMESCHEDULE_REQ = _BASE_URL + "api/switchhomeschedule"
class AbstractHomeData(ABC):
"""Abstract class of Netatmo energy devices."""
raw_data: dict = defaultdict(dict)
homes: dict = defaultdict(dict)
modules: dict = defaultdict(dict)
rooms: dict = defaultdict(dict)
schedules: dict = defaultdict(dict)
zones: dict = defaultdict(dict)
setpoint_duration: dict = defaultdict(dict)
def process(self) -> None:
"""Process data from API."""
self.homes = {d["id"]: d for d in self.raw_data}
for item in self.raw_data:
home_id = item.get("id")
if not (home_name := item.get("name")):
home_name = "Unknown"
self.homes[home_id]["name"] = home_name
if "modules" not in item:
continue
for module in item["modules"]:
self.modules[home_id][module["id"]] = module
self.setpoint_duration[home_id] = item.get(
"therm_setpoint_default_duration",
)
for room in item.get("rooms", []):
self.rooms[home_id][room["id"]] = room
for schedule in item.get("schedules", []):
schedule_id = schedule["id"]
self.schedules[home_id][schedule_id] = schedule
if schedule_id not in self.zones[home_id]:
self.zones[home_id][schedule_id] = {}
for zone in schedule["zones"]:
self.zones[home_id][schedule_id][zone["id"]] = zone
def _get_selected_schedule(self, home_id: str) -> dict:
"""Get the selected schedule for a given home ID."""
for value in self.schedules.get(home_id, {}).values():
if "selected" in value.keys():
return value
return {}
def get_hg_temp(self, home_id: str) -> float | None:
"""Return frost guard temperature value."""
return self._get_selected_schedule(home_id).get("hg_temp")
def get_away_temp(self, home_id: str) -> float | None:
"""Return the configured away temperature value."""
return self._get_selected_schedule(home_id).get("away_temp")
def get_thermostat_type(self, home_id: str, room_id: str) -> str | None:
"""Return the thermostat type of the room."""
for module in self.modules.get(home_id, {}).values():
if module.get("room_id") == room_id:
return module.get("type")
return None
def is_valid_schedule(self, home_id: str, schedule_id: str):
"""Check if valid schedule."""
schedules = (
self.schedules[home_id][s]["id"] for s in self.schedules.get(home_id, {})
)
return schedule_id in schedules
class HomeData(AbstractHomeData):
"""Class of Netatmo energy devices."""
def __init__(self, auth: NetatmoOAuth2) -> None:
"""Initialize the Netatmo home data.
Arguments:
auth {NetatmoOAuth2} -- Authentication information with a valid access token
"""
self.auth = auth
def update(self) -> None:
"""Fetch and process data from API."""
resp = self.auth.post_request(url=_GETHOMESDATA_REQ)
self.raw_data = extract_raw_data(resp.json(), "homes")
self.process()
def switch_home_schedule(self, home_id: str, schedule_id: str) -> Any:
"""Switch the schedule for a give home ID."""
if not self.is_valid_schedule(home_id, schedule_id):
raise NoSchedule(f"{schedule_id} is not a valid schedule id")
post_params = {"home_id": home_id, "schedule_id": schedule_id}
resp = self.auth.post_request(url=_SWITCHHOMESCHEDULE_REQ, params=post_params)
LOG.debug("Response: %s", resp)
class AsyncHomeData(AbstractHomeData):
"""Class of Netatmo energy devices."""
def __init__(self, auth: AbstractAsyncAuth) -> None:
"""Initialize the Netatmo home data.
Arguments:
auth {AbstractAsyncAuth} -- Authentication information with a valid access token
"""
self.auth = auth
async def async_update(self):
"""Fetch and process data from API."""
resp = await self.auth.async_post_request(url=_GETHOMESDATA_REQ)
assert not isinstance(resp, bytes)
self.raw_data = extract_raw_data(await resp.json(), "homes")
self.process()
async def async_switch_home_schedule(self, home_id: str, schedule_id: str) -> None:
"""Switch the schedule for a give home ID."""
if not self.is_valid_schedule(home_id, schedule_id):
raise NoSchedule(f"{schedule_id} is not a valid schedule id")
resp = await self.auth.async_post_request(
url=_SWITCHHOMESCHEDULE_REQ,
params={"home_id": home_id, "schedule_id": schedule_id},
)
LOG.debug("Response: %s", resp)
class AbstractHomeStatus(ABC):
"""Abstract class of the Netatmo home status."""
raw_data: dict = defaultdict(dict)
rooms: dict = defaultdict(dict)
thermostats: dict = defaultdict(dict)
valves: dict = defaultdict(dict)
relays: dict = defaultdict(dict)
def process(self) -> None:
"""Process data from API."""
for room in self.raw_data.get("rooms", []):
self.rooms[room["id"]] = room
for module in self.raw_data.get("modules", []):
if module["type"] in {"NATherm1", "OTM"}:
self.thermostats[module["id"]] = module
elif module["type"] == "NRV":
self.valves[module["id"]] = module
elif module["type"] in {"OTH", "NAPlug"}:
self.relays[module["id"]] = module
def get_room(self, room_id: str) -> dict:
"""Return room data for a given room id."""
for value in self.rooms.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_thermostat(self, room_id: str) -> dict:
"""Return thermostat data for a given room id."""
for value in self.thermostats.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_relay(self, room_id: str) -> dict:
"""Return relay data for a given room id."""
for value in self.relays.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def get_valve(self, room_id: str) -> dict:
"""Return valve data for a given room id."""
for value in self.valves.values():
if value["id"] == room_id:
return value
raise InvalidRoom(f"No room with ID {room_id}")
def set_point(self, room_id: str) -> float | None:
"""Return the setpoint of a given room."""
return self.get_room(room_id).get("therm_setpoint_temperature")
def set_point_mode(self, room_id: str) -> str | None:
"""Return the setpointmode of a given room."""
return self.get_room(room_id).get("therm_setpoint_mode")
def measured_temperature(self, room_id: str) -> float | None:
"""Return the measured temperature of a given room."""
return self.get_room(room_id).get("therm_measured_temperature")
def boiler_status(self, module_id: str) -> bool | None:
"""Return the status of the boiler status."""
return self.get_thermostat(module_id).get("boiler_status")
class HomeStatus(AbstractHomeStatus):
"""Class of the Netatmo home status."""
def __init__(self, auth: NetatmoOAuth2, home_id: str):
"""Initialize the Netatmo home status.
Arguments:
auth {NetatmoOAuth2} -- Authentication information with a valid access token
home_id {str} -- ID for targeted home
"""
self.auth = auth
self.home_id = home_id
def update(self) -> None:
"""Fetch and process data from API."""
resp = self.auth.post_request(
url=_GETHOMESTATUS_REQ,
params={"home_id": self.home_id},
)
self.raw_data = extract_raw_data(resp.json(), "home")
self.process()
def set_thermmode(
self,
mode: str,
end_time: int = None,
schedule_id: str = None,
) -> str | None:
"""Set thermotat mode."""
post_params = {"home_id": self.home_id, "mode": mode}
if end_time is not None and mode in {"hg", "away"}:
post_params["endtime"] = str(end_time)
if schedule_id is not None and mode == "schedule":
post_params["schedule_id"] = schedule_id
return self.auth.post_request(url=_SETTHERMMODE_REQ, params=post_params).json()
def set_room_thermpoint(
self,
room_id: str,
mode: str,
temp: float = None,
end_time: int = None,
) -> str | None:
"""Set room themperature set point."""
post_params = {"home_id": self.home_id, "room_id": room_id, "mode": mode}
# Temp and endtime should only be send when mode=='manual', but netatmo api can
# handle that even when mode == 'home' and these settings don't make sense
if temp is not None:
post_params["temp"] = str(temp)
if end_time is not None:
post_params["endtime"] = str(end_time)
return self.auth.post_request(
url=_SETROOMTHERMPOINT_REQ,
params=post_params,
).json()
class AsyncHomeStatus(AbstractHomeStatus):
"""Class of the Netatmo home status."""
def __init__(self, auth: AbstractAsyncAuth, home_id: str):
"""Initialize the Netatmo home status.
Arguments:
auth {AbstractAsyncAuth} -- Authentication information with a valid access token
home_id {str} -- ID for targeted home
"""
self.auth = auth
self.home_id = home_id
async def async_update(self) -> None:
"""Fetch and process data from API."""
resp = await self.auth.async_post_request(
url=_GETHOMESTATUS_REQ,
params={"home_id": self.home_id},
)
assert not isinstance(resp, bytes)
self.raw_data = extract_raw_data(await resp.json(), "home")
self.process()
async def async_set_thermmode(
self,
mode: str,
end_time: int = None,
schedule_id: str = None,
) -> str | None:
"""Set thermotat mode."""
post_params = {"home_id": self.home_id, "mode": mode}
if end_time is not None and mode in {"hg", "away"}:
post_params["endtime"] = str(end_time)
if schedule_id is not None and mode == "schedule":
post_params["schedule_id"] = schedule_id
resp = await self.auth.async_post_request(
url=_SETTHERMMODE_REQ,
params=post_params,
)
assert not isinstance(resp, bytes)
return await resp.json()
async def async_set_room_thermpoint(
self,
room_id: str,
mode: str,
temp: float = None,
end_time: int = None,
) -> str | None:
"""Set room themperature set point."""
post_params = {"home_id": self.home_id, "room_id": room_id, "mode": mode}
# Temp and endtime should only be send when mode=='manual', but netatmo api can
# handle that even when mode == 'home' and these settings don't make sense
if temp is not None:
post_params["temp"] = str(temp)
if end_time is not None:
post_params["endtime"] = str(end_time)
resp = await self.auth.async_post_request(
url=_SETROOMTHERMPOINT_REQ,
params=post_params,
)
assert not isinstance(resp, bytes)
return await resp.json()
| 34.624309
| 92
| 0.608106
|
e62344877dd8fa9a57aea577d61fe01ae0cda495
| 1,041
|
py
|
Python
|
symposion/proposals/forms.py
|
theofanislekkas/updated-symp
|
2bf5fa85ef2adb71325cbdd2bdfef2b0742b614a
|
[
"BSD-3-Clause"
] | null | null | null |
symposion/proposals/forms.py
|
theofanislekkas/updated-symp
|
2bf5fa85ef2adb71325cbdd2bdfef2b0742b614a
|
[
"BSD-3-Clause"
] | null | null | null |
symposion/proposals/forms.py
|
theofanislekkas/updated-symp
|
2bf5fa85ef2adb71325cbdd2bdfef2b0742b614a
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.db.models import Q
from symposion.proposals.models import SupportingDocument
# @@@ generic proposal form
class AddSpeakerForm(forms.Form):
email = forms.EmailField(
label="Email address of new speaker (use their email address, not yours)"
)
def __init__(self, *args, **kwargs):
self.proposal = kwargs.pop("proposal")
super(AddSpeakerForm, self).__init__(*args, **kwargs)
def clean_email(self):
value = self.cleaned_data["email"]
exists = self.proposal.additional_speakers.filter(
Q(user=None, invite_email=value) |
Q(user__email=value)
).exists()
if exists:
raise forms.ValidationError(
"This email address has already been invited to your talk proposal"
)
return value
class SupportingDocumentCreateForm(forms.ModelForm):
class Meta:
model = SupportingDocument
fields = [
"file",
"description",
]
| 26.025
| 83
| 0.626321
|
a416cf5b7fa3bc265bd1997155c3d980b56d5de0
| 1,842
|
py
|
Python
|
src/application/command/user/update_role.py
|
jagoPG/-restaurant-ml-inspector
|
4efc7855401cc8cfa9d5e470c14685158a607448
|
[
"Apache-2.0"
] | 1
|
2018-07-10T12:53:35.000Z
|
2018-07-10T12:53:35.000Z
|
src/application/command/user/update_role.py
|
jagoPG/-restaurant-ml-inspector
|
4efc7855401cc8cfa9d5e470c14685158a607448
|
[
"Apache-2.0"
] | null | null | null |
src/application/command/user/update_role.py
|
jagoPG/-restaurant-ml-inspector
|
4efc7855401cc8cfa9d5e470c14685158a607448
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 Jagoba Pérez-Gómez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from src.domain.exception import UserDoesNotExist
from src.domain.model import User
from src.infrastructure.command_bus import CommandHandler
from uuid import uuid4
class UpdateUserRoleCommand(object):
"""
Changes the user role of an user
"""
def __init__(self, user_identifier, roles):
self.user_identifier = user_identifier
self.roles = roles
class UpdateUserRole(CommandHandler):
def __init__(self):
self.user_repository = None
def invoke(self, command):
user = self.__get_user(command.user_identifier)
self.__manage_roles(user, command.roles)
def __get_user(self, user_id):
user = self.user_repository.get_of_identifier(user_id)
if not user:
raise UserDoesNotExist
return user
def __manage_roles(self, user, roles):
for role in User.get_roles():
if role not in roles:
user.revoke_role(role)
else:
user.grant_role(role)
if role == 'DEVELOPER':
self.__generate_api_key(user)
self.user_repository.persist(user)
@staticmethod
def __generate_api_key(user):
user.api_key = uuid4().__str__()
| 29.238095
| 72
| 0.688382
|
5cfd8e90804b3fbaf43beeb1113203bac5bcaf37
| 7,844
|
py
|
Python
|
test/x509Validation/ValidationTest.py
|
river2sea/X509Validation
|
878584cc59a26cdad7ffc719ea94d87ad303433a
|
[
"MIT"
] | 7
|
2015-11-05T14:14:53.000Z
|
2020-10-24T11:51:48.000Z
|
test/x509Validation/ValidationTest.py
|
river2sea/X509Validation
|
878584cc59a26cdad7ffc719ea94d87ad303433a
|
[
"MIT"
] | 5
|
2015-11-02T04:39:05.000Z
|
2015-12-08T02:49:42.000Z
|
test/x509Validation/ValidationTest.py
|
river2sea/X509Validation
|
878584cc59a26cdad7ffc719ea94d87ad303433a
|
[
"MIT"
] | 1
|
2022-03-13T02:06:23.000Z
|
2022-03-13T02:06:23.000Z
|
import datetime
import sys
import traceback
import unittest
from cryptography import *
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from .Rule import CompositeValidationRule, ValidityPeriodRule, \
BasicConstraintsRule, SignatureHashAlgorithmRule, SignatureVerificationRule, \
KeyUsageExtensionRule, CertificateRevocationListRule
from .Validation import CertificateChainDelegate, \
ListBackedCertificateLookup, CertificateChain, \
CertificateRevocationListLookup
#xyz = x509.KeyUsage(digital_signature, content_commitment, key_encipherment, data_encipherment, key_agreement, key_cert_sign, crl_sign, encipher_only, decipher_only)
trustedKeyUsage = x509.KeyUsage(
False, # digital_signature
False, # content_commitment
False, # key_encipherment
False, # data_encipherment
False, # key_agreement
True, # key_cert_sign
True, # crl_sign
False, # encipher_only
False # decipher_only
)
untrustedKeyUsage = x509.KeyUsage(
True, # digital_signature
True, # content_commitment (aka non_repudiation)
True, # key_encipherment
True, # data_encipherment
False, # key_agreement
False, # key_cert_sign
False, # crl_sign
False, # encipher_only
False # decipher_only
)
trustedRuleSet = CompositeValidationRule( name = "Trusted Rule Set")
trustedRuleSet.addRule( ValidityPeriodRule() )
trustedRuleSet.addRule( BasicConstraintsRule( True, 1 ) )
trustedRuleSet.addRule( KeyUsageExtensionRule( trustedKeyUsage ) )
trustedRuleSet.addRule( SignatureHashAlgorithmRule( hashes.SHA256 ) )
# trustedRuleSet.addRule( CriticalExtensionsRule() )
trustedRuleSet.addRule( SignatureVerificationRule() )
untrustedRuleSet = CompositeValidationRule( name = "Untrusted Rule Set" )
untrustedRuleSet.addRule( ValidityPeriodRule() )
untrustedRuleSet.addRule( BasicConstraintsRule( False, 0 ) )
untrustedRuleSet.addRule( KeyUsageExtensionRule( untrustedKeyUsage ) )
untrustedRuleSet.addRule( SignatureHashAlgorithmRule( hashes.SHA256 ) )
# untrustedRuleSet.addRule( CriticalExtensionsRule() )
untrustedRuleSet.addRule( SignatureVerificationRule() )
crlRuleSet = CompositeValidationRule( name = "CRL Rule Set" )
crlRuleSet.addRule( ValidityPeriodRule() )
crlRuleSet.addRule( BasicConstraintsRule( False, 0 ) )
crlRuleSet.addRule( KeyUsageExtensionRule( untrustedKeyUsage ) )
crlRuleSet.addRule( SignatureHashAlgorithmRule( hashes.SHA256 ) )
# crlRuleSet.addRule( CriticalExtensionsRule() )
crlRuleSet.addRule( SignatureVerificationRule() )
def dumpTraceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_tb( exc_traceback, limit=1, file=sys.stdout )
traceback.print_exception( exc_type, exc_value, exc_traceback, file=sys.stdout )
class TestCertificateChainDelegate( CertificateChainDelegate ):
def __init__( self ):
self._errors = []
@property
def errors( self ):
return self._errors
def currentTime( self ):
return datetime.datetime.now()
def verifySignature( self, issuerCertificate, subjectCertificate ):
'''
This test is assuming a signature algorithm of sha256WithRSAEncryption/null-parameters.
'''
try:
# print( 'Verifying the signature of the subject certificate({0}) with the issuerCertificate({1})...'.format( subjectCertificate, issuerCertificate ) )
issuerPublicKey = issuerCertificate.public_key()
hashAlgorithm = subjectCertificate.signature_hash_algorithm
tbsCertificate = subjectCertificate.tbs_certificate_bytes
subjectSignature = subjectCertificate.signature
padding = PKCS1v15()
verifier = issuerPublicKey.verifier( subjectSignature, padding, hashAlgorithm )
verifier.update( tbsCertificate )
verifier.verify()
return True
except InvalidSignature:
return False
except Exception as e:
raise e
def ruleFailed( self, ruleResult ):
self._errors.append( ruleResult )
def shouldFailEarly( self ):
'''
Return True if path validation should abort when the first
rule fails, or if it should continue processing the certificate
so we can gather all of the errors in the certificate when it
contains more than one defect.
'''
return False
def dumpErrors( self ):
for error in self.errors:
print( error )
class TestCRLLookup( CertificateRevocationListLookup ):
def __init__( self ):
self._serialNumbers = []
def addSerialNumber( self, serialNumber ):
self._serialNumbers.append( serialNumber )
def certificateIsListed( self, serialNumber ):
if serialNumber in self._serialNumbers:
return True
return False
class ValidationTest( unittest.TestCase ):
@classmethod
def setUpClass( cls ):
trustedCertificates = []
trustedCertificate = cls.loadDERCertifcate( 'data/PKITS/certs/TrustAnchorRootCertificate.crt' )
trustedCertificates.append( trustedCertificate )
trustedCertificate = cls.loadDERCertifcate( 'data/PKITS/certs/GoodCACert.crt' )
trustedCertificates.append( trustedCertificate )
trustedCertificate = cls.loadDERCertifcate( 'data/PKITS/certs/GoodsubCACert.crt' )
trustedCertificates.append( trustedCertificate )
cls.lookup = ListBackedCertificateLookup( trustedCertificates )
@classmethod
def loadDERCertifcate( cls, path ):
with open( path, 'rb' ) as inputFile:
data = inputFile.read()
certificate = x509.load_der_x509_certificate( data, default_backend() )
return certificate
def test_GoodCertificateValidation( self ):
untrustedCertificate = ValidationTest.loadDERCertifcate( 'data/PKITS/certs/ValidCertificatePathTest1EE.crt' )
delegate = TestCertificateChainDelegate()
chain = CertificateChain( delegate, ValidationTest.lookup, trustedRuleSet, untrustedRuleSet )
isValid = chain.isValid( untrustedCertificate )
if not isValid:
delegate.dumpErrors()
self.assertTrue( isValid, 'Certificate is invalid.' )
def test_BadCertificateValidation( self ):
untrustedCertificate = ValidationTest.loadDERCertifcate( 'data/PKITS/certs/BadSignedCACert.crt' )
delegate = TestCertificateChainDelegate()
chain = CertificateChain( delegate, ValidationTest.lookup, trustedRuleSet, trustedRuleSet )
isValid = chain.isValid( untrustedCertificate )
if isValid:
delegate.dumpErrors()
self.assertTrue( not isValid, 'Certificate is valid, expected invalid.' )
def test_CRLLookup( self ):
untrustedCertificate = ValidationTest.loadDERCertifcate( 'data/PKITS/certs/ValidCertificatePathTest1EE.crt' )
crlLookup = TestCRLLookup()
crlLookup.addSerialNumber( untrustedCertificate.serial )
crlRuleSet.addRule( CertificateRevocationListRule( crlLookup ) )
delegate = TestCertificateChainDelegate()
chain = CertificateChain( delegate, ValidationTest.lookup, trustedRuleSet, crlRuleSet )
isValid = chain.isValid( untrustedCertificate )
self.assertTrue( not isValid, 'Certificate is valid, expected invalid (on CRL).' )
if __name__ == "__main__":
unittest.main()
| 41.068063
| 166
| 0.706272
|
dee2528687e98f4906ca3c080c5262e52a26e943
| 38,768
|
py
|
Python
|
proxy/proxy.py
|
Eeyhan/get_jobs
|
d80d577f8ba4fc1498ef251c4fffb66585f89040
|
[
"MIT"
] | 31
|
2019-09-08T14:35:04.000Z
|
2022-01-10T06:39:08.000Z
|
proxy/proxy.py
|
Eeyhan/get_jobs
|
d80d577f8ba4fc1498ef251c4fffb66585f89040
|
[
"MIT"
] | 3
|
2021-06-18T03:04:14.000Z
|
2022-02-28T06:17:46.000Z
|
proxy/proxy.py
|
Eeyhan/get_jobs
|
d80d577f8ba4fc1498ef251c4fffb66585f89040
|
[
"MIT"
] | 17
|
2019-09-09T07:26:59.000Z
|
2022-03-03T11:41:51.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author : Eeyhan
# @File : proxy.py
import gevent
from gevent import monkey
monkey.patch_all() # 如果是要开启进程池需要把这个注释掉
import json
import re
import asyncio
import requests
from lxml import etree
from bs4 import BeautifulSoup
import random
from functools import reduce
from concurrent.futures import ThreadPoolExecutor, as_completed, wait, FIRST_COMPLETED
import time
import redis
import pytesseract
from PIL import Image
import js2py
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from config import PROXY_URLS, USER_AGENT, TEST_PROXY_URLS, POOL
class BaseProxy(object):
"""获取代理IP"""
def __init__(self):
self.header = None
self.proxy_list = [] # 代理IP列表
self.user_agent = USER_AGENT # 请求的UA
self.user_agent_66ip = random.choice(USER_AGENT) # 66ip代理使用
self.header = self.get_header # 请求头
# 节省创建对象的资源
self.test_proxy_urls = self.get_test_site # 测试代理IP的网址
self.proxy_urls = self.get_proxy_site # 免费代理网址
def req_user_agent(self):
"""
预留的钩子函数,返回的值可以由子类自定制
:return: 返回user-agent
"""
return self.user_agent
@property
def get_proxy_site(self):
"""
预留的钩子函数,返回的值可以由子类自定制
:return:
"""
return self.req_proxy_urls()
def req_proxy_urls(self):
"""
获取代理站点,重新拼接字段
:return: 返回代理ip地址
"""
proxy_url_list = []
for item in PROXY_URLS:
item['type'] = 'parser_' + item['type']
proxy_url_list.append(item)
self.proxy_urls = proxy_url_list
return self.proxy_urls
@property
def get_test_site(self):
"""
预留的钩子函数,返回的值可以由子类自定制
:return:
"""
return self.req_test_proxy_urls(TEST_PROXY_URLS)
def req_test_proxy_urls(self, test_urls):
"""
预留的钩子函数,返回的值可以由子类自定制
:param test_urls: 测试代理IP的url
:return:
"""
test_proxy_urls = []
for item in test_urls:
item['type'] = 'test_' + item['type']
test_proxy_urls.append(item)
self.test_proxy_urls = test_proxy_urls
return self.test_proxy_urls
@property
def get_header(self):
"""
:return: 返回构造好的header头信息
"""
user_agent = self.req_user_agent()
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'User-Agent': random.choice(user_agent),
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1'
}
return headers
def request_common_url(self, url, url_name=None, proxy=None):
"""
访问网站的通用方法
:param url: 网站链接
:param url_name: 请求代理网站时的别名,如果是测试代理不传入该值
:param proxy: 代理参数
:return:
"""
html = None
if proxy: # 如果有代理
headers = self.header
headers['Referer'] = url
headers['Connection'] = 'close'
try:
res = requests.get(url, headers=headers, proxies=proxy, timeout=(3, 7))
except BaseException as x:
# print('访问出错' % proxy)
return
if not res or res.status_code != 200:
# print('该代理 %s 不可用' % proxy)
return
else: # 不是测试代理
try:
if '66ip' in url:
headers = self.header
headers['User-Agent'] = self.user_agent_66ip
res = requests.get(url, headers=headers, timeout=(3, 7))
else:
res = requests.get(url, headers=self.header, timeout=(3, 7))
except Exception as e:
# print(e)
return
if res.status_code != 200:
# 如果是解析的66ip网站
if url_name and url_name == 'parser_66ip':
res = self.solve_66ip(res)
else:
# print('错误:网络请求超时,可能请求被拒绝')
pass
if not res:
# print('错误:网络请求超时,可能请求被拒绝')
return
if res:
try:
html = res.content.decode('utf-8')
except Exception as s:
# print(s)
html = res.content.decode('gb2312')
if url_name:
if url_name.startswith('parser'):
result = self.parser(html, url_name)
elif url_name.startswith('test'):
result = self.parser(html, url_name, proxy)
return result
elif not url_name:
return res, html
else:
return
def solve_66ip(self, response):
"""
处理66ip的js加密
:param response: 第一次请求返回的js数据
:return: 返回解密好的网页数据
"""
cookie = response.headers["Set-Cookie"]
js = response.text.encode("utf8").decode("utf8")
js = js.replace("<script>", "").replace("</script>", "").replace("{eval(", "{var data1 = (").replace(chr(0),
chr(32))
# 使用js2py处理js
context = js2py.EvalJs()
context.execute(js)
js_temp = context.data1
index1 = js_temp.find("document.")
index2 = js_temp.find("};if((")
print('11', js_temp[index1:index2])
js_temp = js_temp[index1:index2].replace("document.cookie", "data2")
print('22', js_temp)
try:
context.execute(js_temp)
except Exception as e:
# time.sleep(2)
# context.execute(js_temp)
pass
data = context.data2
# 合并cookie,重新请求网站。
cookie += ";" + data
response = requests.get("http://www.66ip.cn/mo.php?tqsl=1024", headers={
"User-Agent": self.user_agent_66ip,
"cookie": cookie
}, timeout=(3, 7))
return response
def compare_proxy(self, proxy, current_ip):
"""
拆分代理,只要ip:port
:param proxy:爬取的代理ip
:param current_ip: IP查询网站显示的ip
:return:
"""
proxy_ip = list(proxy.values())[0].split('//')[1]
if current_ip in proxy_ip: # current_ip:x.x.x.x proxy_ip:x.x.x.x:xx
# print(proxy)
return True
# print('current', current_ip, type(current_ip))
# print('proxy', proxy_ip, type(proxy_ip))
return
def request_site(self, proxy_urls):
"""
获取代理网站的源码数据
:return:
"""
task = []
for item in self.proxy_urls:
url = item.get('url')
url_name = item.get('type')
task.append(gevent.spawn(self.request_common_url, url, url_name, None))
gevent.joinall(task)
def request_test_site(self, test_urls=None):
"""
预留的钩子函数,可以重新定义该方法,获取测试代理IP并测试
:param test_urls:测试的urls
:return:
"""
tasks = []
for item in self.proxy_list:
tasks.append(gevent.spawn(self.choice_testsite_request, item))
gevent.joinall(tasks)
def parser(self, html, url_name, proxy=None):
"""
测试代理的分发解析器
:param html: 拿到的网站源码
:param url_name: 请求的代理网站别名
:param proxy: 待测试的代理ip,如果为空则是爬取阶段,如果不为空则是测试代理阶段
:return:
"""
func = getattr(self, url_name)
# 如果对象存在对应解析方法
if func:
# 如果是goubanjia,用BeautifulSoup解析
try:
if url_name in ('parser_goubanjia', 'parser_66ip'):
html = BeautifulSoup(html, "lxml")
result = func(html)
# 此类用字符串处理或者用正则匹配
elif url_name in (
'test_sohu', 'test_onlineservice', 'test_ican', 'test_myip', 'test_httpbin', 'parser_github'):
# result = func(html, proxy)
if not proxy:
result = func(html)
else:
result = func(html, proxy)
# 其余用xpath解析
else:
html = etree.HTML(html)
if not proxy:
result = func(html)
else:
result = func(html, proxy)
except Exception as e:
# print(e)
pass
else:
return result
else:
raise ValueError('尚不存在该网站的解析方法,请根据配置文件添加对应解析方法')
def parser_xici(self, etree_html):
"""
西刺代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//table/tr[position()>1]')
for item in res:
xpath_data = item.xpath('./td/text()')
# print(xpath_data)
ip = xpath_data[0]
port = xpath_data[1]
ip_port = ip + ':' + port
protocal_1 = xpath_data[4].lower()
protocal_2 = xpath_data[5].lower()
protocal = protocal_1 if 'http' in protocal_1 else protocal_2
# 如果还是没有http字样,那就是qq代理
if 'http' not in protocal:
protocal = protocal_1 if 'qq' in protocal_1 else protocal_2
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# self.proxy_dict.update({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_kuaidaili(self, etree_html):
"""
快代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//*[@id="list"]/table/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0]
port = xpath_data[1]
ip_port = ip + ':' + port
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_kuaidaili_new(self, etree_html):
"""
快代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//tr')[10:]
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0]
port = xpath_data[1]
ip_port = ip + ':' + port
protocal = xpath_data[3]
protocal = 'https' if 'HTTPS' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_89ip(self, etree_html):
"""
89代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//table[@class="layui-table"]/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0].replace('\n', '').replace('\t', '')
port = xpath_data[1].replace('\n', '').replace('\t', '')
ip_port = ip + port
self.proxy_list.append({'http': 'http://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_qydaili(self, etree_html):
"""
齐云代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//*[@id="content"]/section/div[2]/table/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0]
port = xpath_data[1]
ip_port = ip + ':' + port
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_3366(self, etree_html):
"""
3366代理解析
:param etree_html: etree对象
:return:
"""
res = etree_html.xpath('//*[@id="list"]/table/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0]
port = xpath_data[1]
ip_port = ip + ':' + port
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_ihuan(self, etree_html):
"""小幻代理,访问过于频繁的话会限流"""
res = etree_html.xpath('//div[@class="table-responsive"]/table/tbody/tr')
for item in res:
ip = item.xpath('string(./td)')
xpath_data = item.xpath('./td/text()')
port = xpath_data[0]
ip_port = ip + ':' + port
protocal = 'https' if xpath_data[3] == '支持' else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_xila(self, etree_html):
"""西拉代理解析"""
res = etree_html.xpath('//table[@class="fl-table"]/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0]
protocal = xpath_data[1].lower()
protocal = 'https' if 'https' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_iphai(self, etree_html):
"""ip海代理"""
res = etree_html.xpath('//table/tr[position()>1]')
for item in res:
xpath_data = item.xpath('./td/text()')
ip = xpath_data[0].strip()
port = xpath_data[1].strip()
ip_port = ip + ':' + port
protocal = xpath_data[3].strip().lower()
protocal = 'https' if 'https' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def decrypt_port(self, port_word):
"""
解密被加密的真实端口号,该字段放在标签的属性里
:param port_word: 加密字段
:return:
"""
word = list(port_word)
num_list = []
for item in word:
num = 'ABCDEFGHIZ'.find(item)
num_list.append(str(num))
port = int("".join(num_list)) >> 0x3
return port
def parser_goubanjia(self, html):
"""
解析goubanjia代理
:param html: 网站源码
:return:
"""
soup = html.select('tr')[1:]
prototal_list = []
temp_list = []
for item in soup:
a_list = item.select('td > a')
for a in a_list:
if 'http' in a or 'https' in a:
protocal = a.string
prototal_list.append(protocal)
td_list = item.select('td[class="ip"]')
for td in td_list:
child_list = td.find_all()
text = ""
for child in child_list:
if 'style' in child.attrs.keys():
if child.attrs['style'].replace(' ', '') == "display:inline-block;":
if child.string != None:
text = text + child.string
# 过滤出端口号
elif 'class' in child.attrs.keys():
class_list = child.attrs['class']
if 'port' in class_list:
port = self.decrypt_port(class_list[1])
# 拼接端口
text = text + ":" + str(port)
else:
if child.string != None:
text = text + child.string
temp_list.append(text)
data = zip(prototal_list, temp_list)
for item in data:
self.proxy_list.append({item[0]: item[0] + '://' + item[1]})
# print(proxy_list)
return self.proxy_list
def parser_da5u(self, html):
"""
da5u代理解析
:param html:
:return:
"""
ports = html.xpath('//li[contains(@class,"port")]')
port_list = []
for port in ports:
encryption_port = port.values()[0].split(' ')[1]
port = self.decrypt_port(encryption_port)
port_list.append(port)
items = html.xpath('//ul[@class="l2"]')
temp_data = []
for item in items:
xpath_data = item.xpath('./span/li/text()')
ip = xpath_data[0]
protocal = xpath_data[3]
temp_data.append([ip, protocal])
res = zip(temp_data, port_list)
for item in res:
proxy = {item[0][1]: item[0][1] + '://' + item[0][0] + ':' + str(item[1])}
self.proxy_list.append(proxy)
return self.proxy_list
def parser_feiyi(self, html):
"""
飞蚁代理解析
:param html:etree对象
:return:
"""
res = html.xpath('//table/tr[position()>1]')
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0] + ':' + xpath_data[1]
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list)
return self.proxy_list
def parser_shenji(self, html):
"""
神鸡代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//table/tr[position()>1]')
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0]
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_mipu(self, html):
"""
米扑代理解析 该方法未完善,后续补充
:param html: etree对象
:return:
"""
response_ip = html.xpath('//td[@class="tbl-proxy-ip"]')
response_protocal = html.xpath('//td[@class="tbl-proxy-type"]')
ips = []
protocals = []
for item in response_ip:
xpath_data = item.xpath('string(.)')
ips.append(xpath_data)
for item in response_protocal:
xpath_data = item.xpath('string(.)').lower()
temp_protocal = 'https' if 'https' in xpath_data else 'http'
protocals.append(temp_protocal)
# 这里考虑再三,不能用并发提速,因为并发会乱序,导致ip和端口无法对应
response_port = html.xpath('//td[@class="tbl-proxy-port"]')
url_start = 'https://proxy.mimvp.com/'
ports = []
for item in response_port:
port_img_url = url_start + item.xpath('./img/@src')[0]
headers = self.header
data = requests.get(port_img_url, headers=headers, timeout=(3, 7)).content
port = self.ocr_get_port(data)
ports.append(port)
result = zip(protocals, ips, ports)
for item in result:
self.proxy_list.append({item[0]: item[0] + '://' + item[1] + ':' + item[2]})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def ocr_get_port(self, data):
"""
用ocr提取图片中的端口
:param data: 返回的图片二进制流结果
:return:
"""
f = open('port.png', 'wb')
f.write(data)
f.close()
pytesseract.pytesseract.tesseract_cmd = 'C://Program Files//Tesseract-OCR//tesseract.exe'
port = pytesseract.image_to_string(Image.open('port.png'),
config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789')
# 删除图片
os.remove('port.png')
return port
def parser_kaixin(self, html):
"""
开心代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//*[@id="nav_btn01"]/div[6]/table/tbody/tr')
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0] + ':' + xpath_data[1]
protocal = xpath_data[3].lower()
protocal = 'https' if 'https' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_jisu(self, html):
"""
极速代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//tr')[5:]
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0] + ':' + xpath_data[1]
protocal = xpath_data[3].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_jxl(self, html):
"""
jxl代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//table/tbody/tr')
# print(len(res))
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[1] + ':' + xpath_data[2]
protocal = xpath_data[4].lower()
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_cross(self, html):
"""
cross代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//table/tr[position()>1]')
# print(len(res))
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0] + ':' + xpath_data[1]
protocal = xpath_data[4].lower()
protocal = 'https' if 'https' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_nima(self, html):
"""
尼玛代理解析
:param html: etree对象
:return:
"""
res = html.xpath('//table/tbody/tr')
# print(len(res))
for item in res:
xpath_data = item.xpath('./td/text()')
ip_port = xpath_data[0]
protocal = xpath_data[1].lower()
protocal = 'https' if 'https' in protocal else 'http'
self.proxy_list.append({protocal: protocal + '://' + ip_port})
# print(self.proxy_list, len(self.proxy_list))
return self.proxy_list
def parser_github(self, html):
"""
解析github上的数据
:param html:
:return:
"""
proxies = html.split('\n')
for item in proxies:
res = json.loads(item)
port = res.get('port')
ip = res.get('host')
protocal = res.get('type')
proxy = {protocal: protocal + '://' + ip + ':' + str(port)}
self.proxy_list.append(proxy)
def parser_xsdaili(self, html):
"""
小舒代理网站解析
:param html:
:return:
"""
# 爬取每天的最新的
res = html.xpath('//div[contains(@class,"ips")][position()<3]')
url_start = 'http://www.xsdaili.com'
for item in res:
second_url = url_start + item.xpath('./div[@class="title"]/a/@href')[0]
result = self.get_xsdaili_result(second_url)
self.proxy_list.append(result)
return self.proxy_list
def get_xsdaili_result(self, url):
"""
小舒代理二层网页爬取
:param url:
:return:
"""
headers = self.header
response = requests.get(url, headers=headers, timeout=(3, 7)).content
try:
content = response.decode('utf-8')
except Exception as e:
# print(e)
content = response.decode('gb2312')
etree_html = etree.HTML(content)
items = etree_html.xpath('//div[@class="cont"]/text()')[:-1]
proxies = []
for item in items:
ip_port, protocal = item.strip().split('@')
protocal = protocal.split('#')[0].lower()
proxy = {protocal: protocal + '://' + ip_port}
proxies.append(proxy)
return proxies
def parser_66ip(self, html):
"""
解析66ip的代理
:param html: beautifulsoup对象
:return:
"""
res = html.find('p').stripped_strings
for item in res:
if '$' in item or '}' in item:
continue
self.proxy_list.append({'http': 'http://' + item})
return self.proxy_list
def choice_testsite_request(self, proxy):
"""
选择测试网站并测试代理是否可用
:param proxy: 待测试的代理
:return:
"""
test_url = random.choice(self.test_proxy_urls)
url = test_url.get('url')
url_name = test_url.get('type')
result = self.request_common_url(url, url_name, proxy)
if not result:
self.proxy_list.remove(proxy)
return self.proxy_list
def get_test_proxy(self, proxy=None):
"""
测试代理是否成功
:param proxy: 代理,如果为None则为协程使用,如果不为None则为线程使用
:return: 成功返回True,失败范围False
"""
if not proxy:
self.request_test_site()
else:
result = self.choice_testsite_request(proxy)
if result:
return result
else: # 如果没有结果,换个测试网站重新测试
pass
# # 递归查找,直到有正常数据返回
# self.get_test_proxy(proxy)
@property
def proxy(self):
"""测试代理入口方法"""
self.get_test_proxy()
return self.proxy_list
def proxy_duplicate_removal(self):
"""
对爬取到的数据去重
:return:
"""
# proxy_list = lambda x, y: x if y in x else x + [y]
# self.proxy_list = reduce(proxy_list, [[], ] + self.proxy_list)
# return self.proxy_list
new_proxy_list = []
for item in self.proxy_list:
if item not in new_proxy_list:
new_proxy_list.append(item)
self.proxy_list = new_proxy_list
return self.proxy_list
def get_proxy(self, url=None):
"""
获取最终的结果
:param url: 代理网站
:return:
"""
self.request_site(proxy_urls=url)
# 去重
self.proxy_list = self.proxy_duplicate_removal()
print('已爬取代理 %s 个' % len(self.proxy_list))
return self.proxy_list
@property
def proxies(self):
"""
入口方法,返回代理IP数组
:return:
"""
result = self.get_proxy()
return result
def test_baidu(self, etree_html, proxy):
"""
用代理ip测试访问百度网站
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//span[@class="c-gap-right"]/text()')[0].split(':')[1].strip()
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_sogou(self, etree_html, proxy):
"""
用代理ip测试访问搜狗网站
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//div[@id="ipsearchresult"]/strong/text()')[0].split(' ')[0].strip()
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_so(self, etree_html, proxy):
"""
用代理ip测试访问360搜索网站
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//p[@class="mh-detail "]/span/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_miji(self, etree_html, proxy):
"""
用代理ip测试访问秘迹网站
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//*[@id="main_results"]/div[2]/span/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_chinaz(self, etree_html, proxy):
"""
chinaz的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//*[@id="rightinfo"]/dl/dd[1]/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_ipip(self, etree_html, proxy):
"""
ipip的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//input/@value')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_ipcn(self, etree_html, proxy):
"""
ipcn的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//div[@id="result"]/div/p[1]/code/text()')
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_luip(self, etree_html, proxy):
"""
luip的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//*[@id="ipaddress"]/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_ttt(self, etree_html, proxy):
"""
ttt的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//*[@id="getip"]/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_taobao(self, etree_html, proxy):
"""
ttt的IP查询
:param etree_html: etree对象
:param proxy: 待测试的代理IP
:return:
"""
current_ip = etree_html.xpath('//*[@id="obviousIp"]/text()')[0]
current_ip = str(current_ip)
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_sohu(self, html, proxy):
"""
搜狐网的IP查询
:param html: 源网站页面
:param proxy: 待测试的代理IP
:return:
"""
html = html.split('=')[1].replace(';', '')
html = json.loads(html)
current_ip = html.get('cip')
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_onlineservice(self, html, proxy):
"""
onlineservice的IP查询
:param html: 该网站较特殊,此时的html就是返回IP
:param proxy: 待测试的代理IP
:return:
"""
result = self.compare_proxy(proxy, html)
if result:
return proxy
def test_ican(self, html, proxy):
"""
ican的IP查询
:param html: 源网站页面,返回的就是ip地址
:param proxy: 待测试的代理IP
:return:
"""
result = self.compare_proxy(proxy, html)
if result:
return proxy
def test_myip(self, html, proxy):
"""
myip的IP查询
:param html: 源网站页面
:param proxy: 待测试的代理IP
:return:
"""
# html = html.replace(' ', '').split(':')[1].split('来')[0]
current_ip = re.findall(r'\d+\.\d+\.\d+\.\d+', html)[0]
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
def test_httpbin(self, html, proxy):
"""
httpbin的IP查询
:param html: 源网站页面,返回的是json字符串
:param proxy: 待测试的代理IP
:return:
"""
html = json.loads(html)
current_ip = html.get('origin')
result = self.compare_proxy(proxy, current_ip)
if result:
return proxy
class NormalProxy(BaseProxy):
"""通用类,如果要扩展功能,可以在此扩展"""
pass
class ThreadProxy(BaseProxy):
"""线程式的类,方便线程调用"""
def request_site(self, proxy_urls):
"""
获取代理网站的源码数据,单个url的方式
:return:
"""
url = proxy_urls.get('url')
url_name = proxy_urls.get('type')
self.request_common_url(url, url_name, None)
def proxy_duplicate_removal(lists):
"""
对爬取到的数据去重
:return:
"""
# proxy_list = lambda x, y: x if y in x else x + [y]
# return reduce(proxy_list, [[], ] + lists)
new_proxy_list = []
for item in lists:
if item not in new_proxy_list:
new_proxy_list.append(item)
return new_proxy_list
def save_redis(proxy_list, key=None):
"""
存储到redis
:param proxy_list: 代理列表
:param key: redis的key
:return:
"""
conn = redis.Redis(connection_pool=POOL)
if not key:
key = 'proxies'
# 检测是否已有值
cont = conn.get(key)
if cont:
cont = eval(cont)
proxy_list.extend(cont)
proxy_list = proxy_duplicate_removal(proxy_list)
print('数据库内还有 %s 个代理可用' % len(proxy_list))
conn.set(key, str(proxy_list))
def get_redis(key=None):
"""
从redis获取值
:param key: redis的key
:return:
"""
conn = redis.Redis(connection_pool=POOL)
if not key:
key = 'proxies'
proxies = conn.get(key)
if proxies:
proxies = eval(proxies)
# proxies = db_test_proxy(proxies) # 在正式的爬取阶段节省时间,可以把此测试代理步骤注释掉
return proxies
else:
print('数据库内无可用代理,请重新爬取')
def thread_exector(thread, res):
"""
线程池启动
:param thread: 线程池对象
:param res: 自定义ThreadProxy对象
:return:
"""
tasks = [thread.submit(res.get_test_proxy, proxy) for proxy in res.proxy_list]
# wait(tasks, return_when=FIRST_COMPLETED)
thread.shutdown()
result = [obj for obj in as_completed(tasks)]
return result
def thread_exector_asynic(thread, res):
"""
线程池异步方法启动
:param thread: 线程池对象
:param res: 自定义ThreadProxy对象
:return:
"""
loop = asyncio.get_event_loop()
tasks = [loop.run_in_executor(thread, res.get_test_proxy, url) for url in res.proxy_list]
loop.run_until_complete(asyncio.wait(tasks))
return tasks
def db_test_proxy(proxies):
# 测试代理ip
res = ThreadProxy()
res.proxy_list = proxies
print('开始测试数据库内代理IP可用性.........')
thread = ThreadPoolExecutor()
proxy_list = []
# ################ 以下两个不能共用 ############
# 说明:如果直接运行proxy.py文件,则推荐使用线程池+异步的方式,如果是flask调用,则推荐线程池的方式
# 线程池方式
tasks = thread_exector(thread, res)
# # 线程池+异步的方式
# tasks = thread_exector_asynic(thread, res)
for item in tasks:
temp_res = item.result()
if temp_res:
if temp_res not in proxy_list:
proxy_list.extend(temp_res)
new_proxy_list = []
for item in proxy_list:
if item not in new_proxy_list:
new_proxy_list.append(item)
save_redis(new_proxy_list)
return new_proxy_list
def main_gevent():
# 利用协程记得monkey打补丁
# 爬取部分
res = NormalProxy()
proxy_list = res.proxies
print('爬取完毕,一共爬取 %s 个代理' % len(proxy_list))
# print(proxy_list, len(res.proxies))
# 测试代理部分
print('开始测试代理IP可用性,整个过程大概需要几分钟,请耐心等待.........')
available_proxy_list = res.proxy
save_redis(available_proxy_list)
return available_proxy_list
def main_thread_pool():
# 利用线程池要比协程速度快
# 爬取部分
res = ThreadProxy()
thread = ThreadPoolExecutor()
tasks1 = [thread.submit(res.get_proxy, url) for url in PROXY_URLS]
thread.shutdown()
temp_data = [obj.result() for obj in as_completed(tasks1)]
data = []
for item in temp_data:
data.extend(item)
proxy_list = proxy_duplicate_removal(data)
print('爬取完毕,一共爬取 %s 个代理' % len(proxy_list))
# 测试代理部分
print('开始测试代理IP可用性,整个过程大概需要几分钟,请耐心等待.........')
res.proxy_list = proxy_list
thread2 = ThreadPoolExecutor()
tasks2 = [thread2.submit(res.get_test_proxy, proxy) for proxy in res.proxy_list]
wait(tasks2)
# thread2.shutdown()
temp_data2 = [obj for obj in as_completed(tasks2)]
data2 = []
for item in temp_data2:
temp_res = item.result()
if temp_res:
data2.extend(temp_res)
data2 = proxy_duplicate_removal(data2)
# 存储到redis
print('开始测试代理IP可用性,整个过程大概需要几分钟,请耐心等待.........')
save_redis(data2)
return data2
# def main_process_pool():
# # 此处利用进程不讨巧
# res = ThreadProxy()
# tasks = []
# process = ProcessPoolExecutor(max_workers=3)
# for url in PROXY_URLS:
# obj = process.submit(res.get_proxy, url).result()
# tasks.append(obj)
# process.shutdown()
# proxy_list = [obj.result() for obj in tasks]
# print(len(proxy_list))
# return proxy_list
def main_thread_pool_asynicio():
# 线程池+异步
# 爬取部分
res = ThreadProxy()
loop = asyncio.get_event_loop()
thread = ThreadPoolExecutor()
tasks = [loop.run_in_executor(thread, res.get_proxy, url) for url in PROXY_URLS]
loop.run_until_complete(asyncio.wait(tasks))
proxy_list = []
for obj in tasks:
proxy_list.extend(obj.result())
# 异步操作会有重复的数据,去重
proxy_list = proxy_duplicate_removal(proxy_list)
print('爬取完毕,一共爬取 %s 个代理' % len(proxy_list))
# 测试代理部分
print('开始测试代理IP可用性,整个过程大概需要几分钟,请耐心等待.........')
res.proxy_list = proxy_list
loop2 = asyncio.get_event_loop()
thread2 = ThreadPoolExecutor()
tasks2 = [loop2.run_in_executor(thread2, res.get_test_proxy, url) for url in res.proxy_list]
loop2.run_until_complete(asyncio.wait(tasks2))
proxy_list2 = []
for item in tasks2:
temp_res = item.result()
if temp_res:
proxy_list2.extend(temp_res)
proxy_list2 = proxy_duplicate_removal(proxy_list2)
# 存储到redis
print('一共爬取了%s个可用的代理' % len(proxy_list))
save_redis(proxy_list2)
return proxy_list2
if __name__ == '__main__':
# ############### 数据库为空时 ###############
"""以下根据自己的使用需求取消注释运行,注意:千万不能三个方法同时运行,会导致数据紊乱"""
start = time.time()
# 第一种,使用协程,速度稍微慢些,但是占用资源小
main_gevent()
# 第二种,使用线程池,速度最快
# res = main_thread_pool()
# 第三种,使用线程池+异步io,综合性更强,推荐该方法
# res2 = main_thread_pool_asynicio()
# print('总用时:', time.time() - start)
"""数据库有值和数据库无值时不能混合使用,容易导致数据紊乱,且当数据库无值存储时已做过代理验证"""
# ############### 数据库有值时 ###############
# res = get_redis()
# # print(res)
| 30.052713
| 118
| 0.539104
|
b8b9b99b70f6abeabb047b4eff8183e5a36c88d8
| 7,521
|
py
|
Python
|
streamlit_recommender.py
|
yahoyoungho/Recipy_Recommender
|
d74b26cc654ee79f60420daa85bdeed0c6777fa0
|
[
"MIT"
] | null | null | null |
streamlit_recommender.py
|
yahoyoungho/Recipy_Recommender
|
d74b26cc654ee79f60420daa85bdeed0c6777fa0
|
[
"MIT"
] | null | null | null |
streamlit_recommender.py
|
yahoyoungho/Recipy_Recommender
|
d74b26cc654ee79f60420daa85bdeed0c6777fa0
|
[
"MIT"
] | null | null | null |
import streamlit as st
import numpy as np
import pandas as pd
import pickle
import sklearn as skl
import os
from gensim import similarities
import spacy
import os
import surprise
from surprise import SVD
from surprise import Dataset, Reader
######### intializing variables ###############
pp_recipes = pickle.load(open("src/data/pkls/recipe_ingrients_pp.pkl","rb"))
ingr_corpus = pickle.load(open("src/data/pkls/ingr_corpus.pkl","rb"))
ingr_dict = pickle.load(open("src/data/pkls/ingr_dictionary.pkl","rb"))
index = similarities.SparseMatrixSimilarity(ingr_corpus, num_features = len(ingr_corpus))
interaction_df = pd.read_csv("src/data/kaggle_food_data/RAW_interactions.csv")
rating_path = "src/data/user_rating_dir/user_ratings.csv"
alg = SVD(n_factors=100, n_epochs=20, lr_all= 0.005, reg_all= 0.025, random_state= 789,verbose=True)
curr_user_id = "special_user"
############# Functions #######################
def prep_ingr(ingredients):
"""preprocess formatting of the list of ingredients
will remove string 'and' and '&' if present
Args:
ingredients (list of strings): list of ingredients
Returns:
list: list of formatted ingredients
"""
toreturn = []
for ingr in ingredients:
# remove 'and' or '&' if exsits
if "and" in ingr or "&" in ingr:
ingr = ingr.replace("and", "").replace("&","") #remove
ingr = ingr.split(" ")
# remove empty strings
while "" in ingr:
ingr.remove("")
for i in ingr:
toreturn.append(i)
else:
toreturn.append("_".join(ingr.split(" ")))
return toreturn
def get_rating_prediction(recipe_ids, alg):
toreturn = []
for recipe_id in recipe_ids:
toreturn.append(alg.predict(curr_user_id, recipe_id).est/5)
return toreturn
def content_base_recommends(input_ingr,num_recs ,index = index, dct = ingr_dict, recipe_df = pp_recipes, alg = None):
"returns certain amount of recipes that is similar to list of input ingredients"
nlp = spacy.load("en_core_web_sm")
ingrs = nlp(input_ingr)
ingrs = " ".join([ingr.lemma_.lower()for ingr in ingrs])
# format ingredients
ingrs = prep_ingr(ingrs.split(" , "))
# format ingredients in bow
ingrs_bow = dct.doc2bow(ingrs)
# get the n_closest recipes
# if alg is not None use it for rating prediction
if alg:
toreturn = recipe_df.iloc[index[ingrs_bow].arg_sort()[-num_recs:]]
recipe_ids = recipe_df.iloc[index[ingrs_bow].arg_sort()[-num_recs:]]["id"].values.tolist()
scaled_ratings = get_rating_prediction(recipe_ids, alg)
toreturn["pref_ratings"] = scaled_ratings # create new column to rank depending on user preference
toreturn.sort_values("pref_ratings", ascending = False, inplace = True) # rank by preference
toreturn.drop(columns = ["pref_ratings"], inpalce = True) # drop the column for better visualization
return toreturn
return recipe_df.iloc[index[ingrs_bow].argsort()[-num_recs:]]
def create_new_interaction_df(new_df, inter_df = interaction_df):
inter_df = inter_df.copy()
"""append user interaction to pre-existing df
Args:
inter_df (pd.DataFrame, optional): df to append to
new_df (pd.DataFrame): df to append
Returns:
pd.DataFrame: appended dataframe with index resetted
"""
inter_df = inter_df.append(new_df)
inter_df.reset_index(drop=True, inplace = True)
return inter_df
def check_filled(file=rating_path):
"""Check if the file is empty
Args:
file (string, optional): path to the file
Returns:
bool : returns True if the file is not empty
"""
with open(file,"r") as f:
if len(f.readlines()):
return True
return False
################## Webapp main interface #######################
can_update_ratings = False
if check_filled():
f = open(rating_path,"r")
if len(f.readlines()) >5:
can_update_ratings = True
rating_is_empty = check_filled(rating_path) # True if the file is filled
pred_alg = None
if can_update_ratings: # if it can update load the prediction alg
pred_alg = pickle.load(open("src/data/user_rating_dir/collaborative_algorithm.pkl","rb"))
st.title("Welcome to Reci-Py Recommender!")
st.header("This recommender will give you recipe recommendation depending on what ingredients you put in!")
st.text("Please type in the list of ingredients you want to use separated by comma!")
ingredients = st.text_input(label = "Ingredients input")
st.text("Please select how many recommendations you want to get! (max: 20)")
n_recs = st.number_input(label = "Number of recipes to recieve", min_value = 2, max_value = 20, value = 5)
n_recs = int(n_recs)
# getting recipe recommendations
if st.button("Get recommendations"):
# if the input is too small don't run
if ingredients: # if valid input
recs = content_base_recommends(ingredients, n_recs, pred_alg)["name id steps description ingredients".split()]
st.write(recs)
# get the rating of the rceipe
st.write("What recipe did you choose?")
ids = recs["id"].values.tolist()
selected_id = st.select_slider(label= "Select the id of the recipe",options = ids)
st.write("Please give the rating for the selected recipe from 0 to 5.")
st.write("0 being the worst and 5 being the best.")
recipe_rating_by_user = st.number_input("Rating of the recipe", min_value = 0,\
max_value = 5, value = 0, step = 1)
user_data = {"user_id": curr_user_id,
"recipe_id": selected_id,
"date":None, "rating":recipe_rating_by_user,
"review":None}
collected_data_df = pd.DataFrame(user_data, index = [0])
if st.button("Save Rating"):# check if custom user rating already exists
if check_filled(): # if there is information already recorded
# merge and save
collected_data_df = create_new_interaction_df(collected_data_df, pd.read_csv(rating_path))
collected_data_df.to_csv(rating_path, index = False)
else:
# just save
collected_data_df.to_csv(rating_path, index = False)
else:
st.write("Please have more ingredients in the input!")
st.text("This will update your preference and chagne the outcome of the recommendations.")
st.markdown("_this will work after you provide some ratings to recipes you have recieved_")
# updating preference
if st.button("Update my perference"): # will run collaborative filtering system
# first check if the number of rating is sufficient
if check_filled():
# if not enough rating don't run
if len(user_pref_df) < 5:
st.write("Not enough ratings. Please provide more ratings to update your preference.")
st.write(f"current number of ratings: {len(user_pref_df)}. {5-len(user_pref_df)} needed.")
else: #if there are enough number of ratings given by the user
st.write("Updating the preference. This will take a little bit of time.")
# training SVD for collaborative filtering
col_alg = SVD(n_factors = 100, n_epochs = 20, lr_all = 0.005, reg_all = 0.025, random_state = 789)
reader = Reader(rating_scale = (0,5))
surprise_dataset = Dataset.load_from_df(create_new_interaction_df(pd.read_csv(rating_path))["user_id recipe_id rating".split(" ")], reader )
col_alg.fit(surprise_dataset.uild_full_trainset())
# save prediction model
pickle.dump(col_alg, "src/data/user_rating_dir/collaborative_algorithm.pkl")
st.write("Preference updated. Your preference will be used in recommending from next use.")
else:
st.write("not enough rating is provided")
| 35.476415
| 144
| 0.702965
|
14bed9107960c8aa94906271baa7011bab6c80e1
| 3,658
|
py
|
Python
|
alipay/aop/api/domain/AlipaySocialGiftVoucherUseModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipaySocialGiftVoucherUseModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipaySocialGiftVoucherUseModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySocialGiftVoucherUseModel(object):
def __init__(self):
self._end_date = None
self._mid = None
self._order_id = None
self._price = None
self._start_date = None
self._use_price = None
self._voucher_id = None
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, value):
self._end_date = value
@property
def mid(self):
return self._mid
@mid.setter
def mid(self, value):
self._mid = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def start_date(self):
return self._start_date
@start_date.setter
def start_date(self, value):
self._start_date = value
@property
def use_price(self):
return self._use_price
@use_price.setter
def use_price(self, value):
self._use_price = value
@property
def voucher_id(self):
return self._voucher_id
@voucher_id.setter
def voucher_id(self, value):
self._voucher_id = value
def to_alipay_dict(self):
params = dict()
if self.end_date:
if hasattr(self.end_date, 'to_alipay_dict'):
params['end_date'] = self.end_date.to_alipay_dict()
else:
params['end_date'] = self.end_date
if self.mid:
if hasattr(self.mid, 'to_alipay_dict'):
params['mid'] = self.mid.to_alipay_dict()
else:
params['mid'] = self.mid
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.start_date:
if hasattr(self.start_date, 'to_alipay_dict'):
params['start_date'] = self.start_date.to_alipay_dict()
else:
params['start_date'] = self.start_date
if self.use_price:
if hasattr(self.use_price, 'to_alipay_dict'):
params['use_price'] = self.use_price.to_alipay_dict()
else:
params['use_price'] = self.use_price
if self.voucher_id:
if hasattr(self.voucher_id, 'to_alipay_dict'):
params['voucher_id'] = self.voucher_id.to_alipay_dict()
else:
params['voucher_id'] = self.voucher_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySocialGiftVoucherUseModel()
if 'end_date' in d:
o.end_date = d['end_date']
if 'mid' in d:
o.mid = d['mid']
if 'order_id' in d:
o.order_id = d['order_id']
if 'price' in d:
o.price = d['price']
if 'start_date' in d:
o.start_date = d['start_date']
if 'use_price' in d:
o.use_price = d['use_price']
if 'voucher_id' in d:
o.voucher_id = d['voucher_id']
return o
| 27.923664
| 71
| 0.559869
|
7856c5dfe3d05b1e7d05bb919e67947b5067b156
| 2,210
|
py
|
Python
|
tutorials/Behavior Cloning/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 3
|
2021-08-12T15:11:28.000Z
|
2021-09-27T16:04:16.000Z
|
tutorials/Behavior Cloning/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | null | null | null |
tutorials/Behavior Cloning/network/CriticNetwork.py
|
namjiwon1023/Code_With_RL
|
37beec975b1685e9f6cf991abed491b854b78173
|
[
"MIT"
] | 1
|
2021-08-05T07:20:57.000Z
|
2021-08-05T07:20:57.000Z
|
# Copyright (c) 2021: Zhiyuan Nan (namjw@hanyang.ac.kr).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import torch as T
import torch.nn as nn
import torch.optim as optim
import os
import random
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import numpy as np
class CriticNetwork(nn.Module):
def __init__(self, n_states, n_actions, args):
super(CriticNetwork, self).__init__()
self.device = args.device
self.checkpoint = os.path.join(args.save_dir + '/' + args.env_name, 'SAC_critic.pth')
self.critic1 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, 1)
)
self.critic2 = nn.Sequential(nn.Linear(n_states + n_actions, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, args.hidden_size),
nn.ReLU(),
nn.Linear(args.hidden_size, 1)
)
self.loss_func = nn.MSELoss()
self.reset_parameters(self.critic1)
self.reset_parameters(self.critic2)
self.optimizer = optim.Adam(self.parameters(), lr=args.critic_lr)
self.to(self.device)
def forward(self, state, action):
cat = T.cat((state, action), dim=-1)
Q1 = self.critic1(cat)
Q2 = self.critic2(cat)
return Q1, Q2
def reset_parameters(self, Sequential, std=1.0, bias_const=1e-6):
for layer in Sequential:
if isinstance(layer, nn.Linear):
nn.init.orthogonal_(layer.weight, std)
nn.init.constant_(layer.bias, bias_const)
def save_model(self):
T.save(self.state_dict(), self.checkpoint)
def load_model(self):
self.load_state_dict(T.load(self.checkpoint))
| 35.079365
| 93
| 0.566063
|
0067b07c33f6b23c9bde23ea1aa184696cf9b832
| 875
|
py
|
Python
|
tests/v1/test_logs_exclusion_filter.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v1/test_logs_exclusion_filter.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v1/test_logs_exclusion_filter.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
from datadog_api_client.v1.model.logs_exclusion_filter import LogsExclusionFilter
class TestLogsExclusionFilter(unittest.TestCase):
"""LogsExclusionFilter unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testLogsExclusionFilter(self):
"""Test LogsExclusionFilter"""
# FIXME: construct object with mandatory attributes with example values
# model = LogsExclusionFilter() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.735294
| 108
| 0.734857
|
33176df708ff4d5237e753df4b7047128bb0a72a
| 3,718
|
py
|
Python
|
tensorpack/dataflow/imgaug/deform.py
|
zhengknight/tensorpack
|
726747313fb2f189dd195d32087897b16a23be0a
|
[
"Apache-2.0"
] | 2
|
2018-10-23T22:02:59.000Z
|
2021-06-19T15:14:02.000Z
|
tensorpack/dataflow/imgaug/deform.py
|
zhengknight/tensorpack
|
726747313fb2f189dd195d32087897b16a23be0a
|
[
"Apache-2.0"
] | null | null | null |
tensorpack/dataflow/imgaug/deform.py
|
zhengknight/tensorpack
|
726747313fb2f189dd195d32087897b16a23be0a
|
[
"Apache-2.0"
] | 1
|
2018-11-14T05:50:57.000Z
|
2018-11-14T05:50:57.000Z
|
# -*- coding: utf-8 -*-
# File: deform.py
from .base import ImageAugmentor
from ...utils import logger
import numpy as np
__all__ = []
# Code was temporarily kept here for a future reference in case someone needs it
# But it was already deprecated,
# because this augmentation is not a general one that people will often find helpful.
class GaussianMap(object):
""" Generate Gaussian weighted deformation map"""
# TODO really needs speedup
def __init__(self, image_shape, sigma=0.5):
assert len(image_shape) == 2
self.shape = image_shape
self.sigma = sigma
def get_gaussian_weight(self, anchor):
"""
Args:
anchor: coordinate of the center
"""
ret = np.zeros(self.shape, dtype='float32')
y, x = np.mgrid[:self.shape[0], :self.shape[1]]
y = y.astype('float32') / ret.shape[0] - anchor[0]
x = x.astype('float32') / ret.shape[1] - anchor[1]
g = np.exp(-(x**2 + y ** 2) / self.sigma)
# cv2.imshow(" ", g)
# cv2.waitKey()
return g
def np_sample(img, coords):
# a numpy implementation of ImageSample layer
coords = np.maximum(coords, 0)
coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))
lcoor = np.floor(coords).astype('int32')
ucoor = lcoor + 1
ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
diff = coords - lcoor
neg_diff = 1.0 - diff
lcoory, lcoorx = np.split(lcoor, 2, axis=2)
ucoory, ucoorx = np.split(ucoor, 2, axis=2)
diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
diffy, diffx = np.split(diff, 2, axis=2)
ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)
ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
img[ucoory, ucoorx, :] * diffx * diffy + \
img[lcoory, ucoorx, :] * ndiffy * diffx + \
img[ucoory, lcoorx, :] * diffy * ndiffx
return ret[:, :, 0, :]
class GaussianDeform(ImageAugmentor):
"""
Some kind of slow deformation I made up. Don't count on it.
"""
# TODO input/output with different shape
def __init__(self, anchors, shape, sigma=0.5, randrange=None):
"""
Args:
anchors (list): list of center coordinates in range [0,1].
shape(list or tuple): image shape in [h, w].
sigma (float): sigma for Gaussian weight
randrange (int): offset range. Defaults to shape[0] / 8
"""
logger.warn("GaussianDeform is slow. Consider using it with 4 or more prefetching processes.")
super(GaussianDeform, self).__init__()
self.anchors = anchors
self.K = len(self.anchors)
self.shape = shape
self.grid = np.mgrid[0:self.shape[0], 0:self.shape[1]].transpose(1, 2, 0)
self.grid = self.grid.astype('float32') # HxWx2
gm = GaussianMap(self.shape, sigma=sigma)
self.gws = np.array([gm.get_gaussian_weight(ank)
for ank in self.anchors], dtype='float32') # KxHxW
self.gws = self.gws.transpose(1, 2, 0) # HxWxK
if randrange is None:
self.randrange = self.shape[0] / 8
else:
self.randrange = randrange
self.sigma = sigma
def _get_augment_params(self, img):
v = self.rng.rand(self.K, 2).astype('float32') - 0.5
v = v * 2 * self.randrange
return v
def _augment(self, img, v):
grid = self.grid + np.dot(self.gws, v)
return np_sample(img, grid)
def _augment_coords(self, coords, param):
raise NotImplementedError()
| 33.8
| 102
| 0.594406
|
1f9c45e6b95eda57814d5b8eed9398606140c107
| 5,540
|
py
|
Python
|
data_process/netcdf/.ipynb_checkpoints/npy_to_nc-checkpoint.py
|
Fifi-Huo/Digital_Appendix_C
|
76aa286a6c3756e98e18d6064826689511e3e8cc
|
[
"MIT"
] | null | null | null |
data_process/netcdf/.ipynb_checkpoints/npy_to_nc-checkpoint.py
|
Fifi-Huo/Digital_Appendix_C
|
76aa286a6c3756e98e18d6064826689511e3e8cc
|
[
"MIT"
] | null | null | null |
data_process/netcdf/.ipynb_checkpoints/npy_to_nc-checkpoint.py
|
Fifi-Huo/Digital_Appendix_C
|
76aa286a6c3756e98e18d6064826689511e3e8cc
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import sys
import netCDF4 as nc4
from src.track_alignment import map_and_reduce
from src.utils import get_datetime, get_file_time_info, minutes_since
swath_channels = ['ev_250_aggr1km_refsb_1', 'ev_250_aggr1km_refsb_2', 'ev_500_aggr1km_refsb_3', 'ev_500_aggr1km_refsb_4', 'ev_500_aggr1km_refsb_5', 'ev_500_aggr1km_refsb_6', 'ev_500_aggr1km_refsb_7', 'ev_1km_refsb_8', 'ev_1km_refsb_9', 'ev_1km_refsb_10', 'ev_1km_refsb_11', 'ev_1km_refsb_12', 'ev_1km_refsb_13L', 'ev_1km_refsb_13H', 'ev_1km_refsb_14L', 'ev_1km_refsb_14H', 'ev_1km_refsb_15', 'ev_1km_refsb_16', 'ev_1km_refsb_17', 'ev_1km_refsb_18', 'ev_1km_refsb_19', 'ev_1km_emissive_20', 'ev_1km_emissive_21', 'ev_1km_emissive_22', 'ev_1km_emissive_23', 'ev_1km_emissive_24', 'ev_1km_emissive_25', 'ev_1km_refsb_26', 'ev_1km_emissive_27', 'ev_1km_emissive_28', 'ev_1km_emissive_29', 'ev_1km_emissive_30', 'ev_1km_emissive_31', 'ev_1km_emissive_32', 'ev_1km_emissive_33', 'ev_1km_emissive_34', 'ev_1km_emissive_35', 'ev_1km_emissive_36', 'latitude', 'longitude','solar_zenith_angle','cloud_mask']
layer_info_channels = {
"CloudLayerType" : 'cloud_layer_type'
}
def copy_dataset_structure(original_filename, copy_filename, deep=True, zlib=True):
with nc4.Dataset(original_filename, 'r') as original:
copy = nc4.Dataset(copy_filename, 'w', format='NETCDF4')
block_list = [(copy, original)]
variables = {}
# create groups if deep
if deep:
for name, group in original.groups.items():
new_group = copy.createGroup(name)
block_list.append((new_group, group))
# copy global attributes
copy.setncatts({a : original.getncattr(a) for a in original.ncattrs()})
for new_block, block in block_list:
# copy dimensions
for name, dim in block.dimensions.items():
new_dim = new_block.createDimension(name, len(dim) if not dim.isunlimited() else None)
# Copy variables
for name, var in block.variables.items():
new_var = new_block.createVariable(name, var.datatype, var.dimensions, zlib=zlib)
# Copy variable attributes
new_var.setncatts({a : var.getncattr(a) for a in var.ncattrs()})
variables[name] = new_var
return copy, variables
def fill_dataset(dataset, variables, swath, layer_info, minutes, status="daylight", deep=True):
shape = swath[0].shape
for i, channel in enumerate(swath_channels):
variables[channel][0] = swath[i].T
if layer_info is not None:
for info_name, channel in layer_info_channels.items():
# map data to swath format
if info_name == "PrecipFlag":
# precipitation flag is not available per layer
info = np.full(shape, variables[channel]._FillValue)
map_and_reduce(layer_info["mapping"], layer_info[info_name], info, layer_info["width-range"])
info = info.T
else:
info = np.full((*shape, 10), variables[channel]._FillValue)
# correct values, from [1, 8] to [0, 7]
if info_name == "CloudLayerType":
info -= 1
info[info < 0] = variables[channel]._FillValue
variables[channel][0] = info
# set global variables and attributes
dataset.status_flag = status
dataset["time"][0] = minutes
def load_npys(swath_path, layer_info_dir="layer-info"):
dirname, filename = os.path.split(swath_path)
swath = np.load(swath_path)
try:
layer_info_dict = np.load(os.path.join(dirname, layer_info_dir, filename)).item()
except FileNotFoundError:
layer_info_dict = None
return swath, layer_info_dict
def save_as_nc(swath, layer_info, swath_path, save_name):
copy, variables = copy_dataset_structure(os.path.join("netcdf","datasetstr.nc"), save_name)
# determine swath status from directory hierarchy
status = "corrupt"
if "daylight" in save_name:
status = "daylight"
elif "night" in save_name:
status = "night"
# convert npy to nc
year, abs_day, hour, minute = get_file_time_info(swath_path)
#month = get_datetime(year, int(abs_day)).month
minutes_since_2016 = minutes_since(int(year), int(abs_day), int(hour), int(minute))
fill_dataset(copy, variables, swath, layer_info, minutes_since_2016, status)
copy.close()
if __name__ == "__main__":
swath_path = sys.argv[2]
save_dir = sys.argv[1]
swath, layer_info = load_npys(swath_path)
# get time info
year, abs_day, hour, minute = get_file_time_info(swath_path)
month = get_datetime(year, int(abs_day)).month
# determine swath status from directory hierarchy
status = "corrupt"
if "daylight" in swath_path:
status = "daylight"
elif "night" in swath_path:
status = "night"
# create save directory
if not os.path.exists(save_dir):
os.makedirs(save_dir)
#create a copy of reference dataset
copy_name = "A{}.{}.{}{}.nc".format(year, abs_day, hour, minute)
copy, variables = copy_dataset_structure(os.path.join("netcdf", "cumulo.nc"), os.path.join(save_dir, month, status, copy_name))
# convert npy to nc
minutes_since_2008 = minutes_since(int(year), int(abs_day), int(hour), int(minute))
fill_dataset(copy, variables, swath, layer_info, minutes_since_2008, status)
copy.close()
| 36.933333
| 899
| 0.669675
|
baf1d11766cd1167f8409170e12e881e27477c43
| 13,260
|
py
|
Python
|
analysis/analysis_helpers.py
|
rahulvenkk/physics-benchmarking-neurips2021
|
9137a3f3a29fa1646a2d7dcf45daf9b22b4241c2
|
[
"MIT"
] | 25
|
2021-06-19T02:48:19.000Z
|
2022-03-05T14:16:13.000Z
|
analysis/analysis_helpers.py
|
rahulvenkk/physics-benchmarking-neurips2021
|
9137a3f3a29fa1646a2d7dcf45daf9b22b4241c2
|
[
"MIT"
] | 18
|
2021-06-24T16:39:24.000Z
|
2022-01-19T18:34:16.000Z
|
analysis/analysis_helpers.py
|
rahulvenkk/physics-benchmarking-neurips2021
|
9137a3f3a29fa1646a2d7dcf45daf9b22b4241c2
|
[
"MIT"
] | 2
|
2021-08-12T22:59:47.000Z
|
2022-02-24T23:30:58.000Z
|
import numpy as np
from itertools import groupby
import numpy as np
import scipy.stats as stats
import pandas as pd
# which columns identify a model?
MODEL_COLS = [
'Model',
'Readout Train Data',
'Readout Type',
'Encoder Type',
'Dynamics Type',
'Encoder Pre-training Task',
'Encoder Pre-training Dataset',
'Encoder Pre-training Seed',
'Encoder Training Task',
'Encoder Training Dataset',
'Encoder Training Seed',
'Dynamics Training Task',
'Dynamics Training Dataset',
'Dynamics Training Seed',
'ModelID',
'Model Kind']
# Which columns can we abstract over the concrete dataset over?
DATASET_ABSTRACTION_COLS = ['Encoder Training Dataset',
'Dynamics Training Dataset', 'Readout Train Data']
DATASET_ABSTRACTED_COLS = [c + " Type" for c in DATASET_ABSTRACTION_COLS]
def item(x):
"""Returns representative single item; helper function for pd.agg"""
return x.tail(1).item()
def get_streak_thresh(numTrials, probResp):
'''
input:
numTrials: how many trials
probResp: probability of True response
output:
prints the 97.5th percentile for unusual streak lengths
'''
X = np.random.choice(a=['False', 'True'], size=(
1000, numTrials), p=[probResp, 1-probResp])
Maxx = []
for x in X:
lst = []
for n, c in groupby(x):
num, count = n, sum(1 for i in c)
lst.append((num, count))
maxx = max([y for x, y in lst])
Maxx.append(maxx)
return np.percentile(Maxx, 97.5)
def get_longest_streak_length(seq):
lst = []
for n, c in groupby(seq):
num, count = n, sum(1 for i in c)
lst.append((num, count))
return max([y for x, y in lst])
def bootstrap_mean(D, col='correct', nIter=1000):
bootmean = []
for currIter in np.arange(nIter):
bootD = D.sample(n=len(D), random_state=currIter, replace=True)
bootmean.append(np.mean(bootD[col].values))
return bootmean
def load_and_preprocess_data(path_to_data):
'''
apply basic preprocessing to human dataframe
'''
# load in data
d = pd.read_csv(path_to_data)
# add column for scenario name
scenarioName = path_to_data.split('/')[-1].split('-')[1].split('_')[0]
# some utility vars
# colnames_with_variable_entries = [col for col in sorted(d.columns) if len(np.unique(d[col]))>1]
colnames = ['gameID', 'trialNum', 'prolificIDAnon', 'stim_ID',
'response', 'target_hit_zone_label', 'correct', 'choices', 'rt']
# colnames = ['gameID','trialNum','stim_ID','response','target_hit_zone_label','correct','choices','rt']
# include all the columns that we can
intersect_cols = [col for col in colnames if col in d.columns]
# subset dataframe by colnames of interest
_D = d[intersect_cols]
_D = _D.assign(scenarioName=scenarioName)
_D = basic_preprocessing(_D)
return _D
def basic_preprocessing(_D):
try:
# preprocess RTs (subtract 2500ms presentation time, log transform)
_D = _D.assign(RT=_D['rt'] - 2500)
_D = _D.assign(logRT=np.log(_D['RT']))
_D = _D.drop(columns=['rt'], axis=1)
except:
_D['RT'] = np.nan
_D['logRT'] = np.nan
# convert responses to boolean
binary_mapper = {'YES': True, 'NO': False, np.nan: np.nan, "Next":np.nan} # Next can show up when we feed in the results of a familiarization dataframe—ignore it for present purposes
_D = _D.assign(responseBool=_D['response'].apply(
lambda x: binary_mapper[x]), axis=0)
# remove _img from stimulus name
_D['stim_ID'] = _D['stim_ID'].apply(lambda n: n.split("_img")[0])
return _D
def apply_exclusion_criteria(D, familiarization_D=None, verbose=False):
'''
Based on `preregistration_neurips2021.md`
Data from an entire experimental session will be excluded if the responses:
* contain a sequence with unusually long streak, defined as occurring less than 2.5% of the time under random responding
* contain a sequence of at least 24 trials alternating "yes" and "no" responses
* are correct for fewer than 4 out of 10 familiarization trials (i.e., 30% correct or lower)
* the mean accuracy for that participant is below 3 standard deviations below the median accuracy across all participants for that scenario
* the mean log-transformed response time for that participant is 3 standard deviations above the median log-transformed response time across all participants for that scenario
Excluded sessions will be flagged. Flagged sessions will not be included in the main analyses. We will also conduct our planned analyses with the flagged sessions included to investigate the extent to which the outcomes of the main analyses change when these sessions are included. Specifically, we will fit a statistical model to all sessions and estimate the effect of a session being flagged on accuracy.
input: D, dataframe from a specific experiment w/ a specific physical domain
output: D, filtered dataframe after exclusions have been applied
'''
# print name of scenario
scenarionName = np.unique(D['scenarioName'])[0]
# check if we have prolificIDAnon
if 'prolificIDAnon' in D.columns:
userIDcol = 'prolificIDAnon'
else:
userIDcol = 'gameID'
if verbose:
print("WARNING: no prolificIDAnon column found. Using gameID instead.")
# init flaggedIDs var
flaggedIDs = []
# what is 97.5th percentile for random sequences of length numTrials and p=0.5?
thresh = get_streak_thresh(150, 0.5)
if verbose:
print('97.5th percentile for streak length is {}.'.format(thresh))
# flag sessions with long streaks
streakyIDs = []
for name, group in D.groupby(userIDcol):
seq = group['response'].values
streak_length = get_longest_streak_length(group['response'].values)
if streak_length > thresh:
streakyIDs.append(name)
if verbose:
print('There are {} flagged IDs so far due to long streaks.'.format(
len(streakyIDs)))
# flag sessions with suspicious alternation pattern
alternatingIDs = []
pattern = list(D['response'].dropna().unique())*10
for name, group in D.groupby(userIDcol):
seq = group['response'].dropna().values
substr = ''.join(pattern)
fullstr = ''.join(seq)
if substr in fullstr:
alternatingIDs.append(name)
if verbose:
print('There are {} flagged IDs so far due to alternating sequences.'.format(
len(alternatingIDs)))
# flag sessions that failed familiarization
# see familiarization_exclusion.py
if familiarization_D is None:
# is familirization dataframe provided in D?
try:
if np.sum(D['condition'] == 'familiarization_prediction') > 0:
familiarization_D = D[D['condition'] == 'familiarization_prediction']
if verbose:
print('Familiarization dataframe provided in D.')
except:
if verbose: print('Familiarization dataframe not provided in D.')
if familiarization_D is not None:
# do we have coverage for all prolific IDs?
if verbose: print('Familiarization dataframe has {} rows.'.format(len(familiarization_D)))
if set(np.unique(familiarization_D[userIDcol])) != set(np.unique(D[userIDcol])):
if verbose: print('Not all prolific IDs are covered in familiarization data. Make sure you pass familiarization data for all trials!')
try:
C_df = familiarization_D.groupby('gameID').agg({'correct': ['sum', 'count']})
# get ratio
C_df['ratio'] = C_df[('correct', 'sum')]/C_df[('correct', 'count')]
C_df_excluded = C_df[C_df['ratio'] <= .3]
# get ProlificIDs for excluded sessions
excludedIDs = C_df_excluded.index.values
# get ProlificIDs for gameIDs
famIDs = []
for gameID in excludedIDs:
famIDs.append(np.unique(familiarization_D[familiarization_D['gameID'] == gameID][userIDcol])[0])
except:
if verbose: print("An error occured during familiarization exclusion")
if verbose:
print("There are {} flagged IDs due to failing the familiarization trials".format(len(C_df_excluded)))
else:
if verbose: print('No familiarization data provided. Pass a dataframe with data from the familiarization trials (full dataframe is okay). Skipping familiarization exclusion.')
# flag sessions with unusually low accuracy
# ignore nan responses
Dacc = D[D['correct'].isna() == False]
Dacc['correct'] = Dacc['correct'].astype(int)
Dacc = Dacc.groupby(userIDcol).agg({'correct':'mean'})
thresh = np.mean(Dacc['correct']) - 3*np.std(Dacc['correct'])
Dacc = Dacc.assign(lowAcc = Dacc['correct']<thresh)
lowAccIDs = list(Dacc[Dacc['lowAcc']==True].index)
if verbose:
print('There are {} flagged IDs so far due to low accuracy.'.format(len(lowAccIDs)))
# flag sessions with unusually high RTs
Drt = D.groupby(userIDcol).agg({'logRT':np.median})
thresh = np.median(Drt['logRT']) + 3*np.std(Drt['logRT'])
Drt = Drt.assign(highRT = Drt['logRT']>thresh)
highRTIDs = list(Drt[Drt['highRT']==True].index)
if verbose:
print('There are {} flagged IDs so far due to high RTs.'.format(len(highRTIDs)))
# combining all flagged sessions
flaggedIDs = streakyIDs + alternatingIDs + lowAccIDs + highRTIDs
if verbose:
print('There are a total of {} flagged IDs.'.format(len(np.unique(flaggedIDs))))
# we also need to exclude ledge stimuli until their reprodicibility is fixed
mask = ~D['stim_ID'].str.contains("ledge")
D = D[mask]
if verbose:
print("{} observations are excluded due to removal of ledge stimuli".format(np.sum(~mask)))
# removing flagged sessions from dataset
D = D[~D[userIDcol].isin(flaggedIDs)]
numSubs = len(np.unique(D[userIDcol].values))
if verbose:
print('There are a total of {} valid and complete sessions for {}.'.format(numSubs, scenarionName))
return D
def same_or_nan(acol,bcol): return [a if a != b else np.nan for a,b in zip(acol,bcol)]
def process_model_dataframe(MD):
"""Apply a couple of steps to read in the output of the model results"""
# add correctness info
MD['correct'] = MD['Actual Outcome'] == MD['Predicted Outcome']
# reverse renaming of scenarios
MD = MD.replace('rollslide','rollingsliding')
MD = MD.replace('cloth','clothiness')
MD = MD.replace('no_rollslide','no_rollingsliding')
MD = MD.replace('no_cloth','no_clothiness')
# add canonical stim name (ie remove redyellow)
MD['Canon Stimulus Name'] = MD['Stimulus Name'].apply(lambda n: "".join(n.split('-redyellow')))
# set dataset columns to 'same' if they match the test data
for col in DATASET_ABSTRACTION_COLS:
MD[col+" Type"] = MD[col]
MD.loc[MD[col] == MD["Readout Test Data"],col+" Type"] = "same"
MD.loc[MD[col] == ["no_"+n for n in MD["Readout Test Data"]],col+" Type"] = "all_but_this"
# MD.loc[MD[col] == "all",col+" Type"] == "all
# force unique model string
MD['ModelID'] = ["_".join(attr) for attr in zip(
MD['Model'].astype(str),
MD['Encoder Type'].astype(str),
MD['Encoder Training Seed'].astype(str),
MD['Encoder Training Task'].astype(str),
MD['Encoder Training Dataset'].astype(str),
MD['Dynamics Training Task'].astype(str),
MD['Dynamics Training Seed'].astype(str),
MD['Dynamics Training Dataset'].astype(str),
["readout"]*len(MD),
MD['Readout Type'].astype(str),
MD['Readout Train Data'].astype(str),
MD['filename'].astype(str)
)]
# force unique model string
MD['ModelID'] = ["_".join(attr) for attr in zip(
MD['Model'].astype(str),
MD['Encoder Type'].astype(str),
MD['Encoder Training Seed'].astype(str),
MD['Encoder Training Task'].astype(str),
MD['Encoder Training Dataset'].astype(str),
MD['Dynamics Training Task'].astype(str),
MD['Dynamics Training Seed'].astype(str),
MD['Dynamics Training Dataset'].astype(str),
["readout"]*len(MD),
MD['Readout Type'].astype(str),
MD['Readout Train Data'].astype(str),
MD['filename'].astype(str)
)]
# add a model kind—the granularity that we want to plot over—columns
# this ignores the specific datasets if they match the testing data, but not otherwise
# ignores Dynamics Training, so we can loop over it in plotting
# get a list of models to plot
MD['Model Kind'] = ["_".join(attr) for attr in zip(
MD['Model'].astype(str),
MD['Encoder Type'].astype(str),
MD['Encoder Training Seed'].astype(str),
MD['Encoder Training Task'].astype(str),
MD['Encoder Training Dataset Type'].astype(str),
MD['Dynamics Training Task'].astype(str),
MD['Dynamics Training Seed'].astype(str),
# MD['Dynamics Training Dataset Type'].astype(str),
MD['Readout Train Data Type'].astype(str),
)]
return MD
| 40.8
| 411
| 0.652489
|
a35875399ef0be82b78d3aaa24c9131cf299d797
| 11,619
|
py
|
Python
|
xarrayutils/test/test_xgcm_utils.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 40
|
2019-02-05T17:06:47.000Z
|
2021-11-05T17:52:28.000Z
|
xarrayutils/test/test_xgcm_utils.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 88
|
2017-03-20T15:53:06.000Z
|
2022-03-16T02:31:10.000Z
|
xarrayutils/test/test_xgcm_utils.py
|
cspencerjones/xarrayutils
|
6c33e83b830b7586693366c520a54c1122194d50
|
[
"MIT"
] | 14
|
2017-04-24T18:58:12.000Z
|
2021-12-02T18:38:42.000Z
|
import pytest
Grid = pytest.importorskip("xgcm.Grid")
import xarray as xr
import numpy as np
from xarray.testing import assert_allclose
from xarrayutils.weighted_operations import weighted_mean
from xarrayutils.xgcm_utils import (
_infer_gridtype,
_get_name,
_get_axis_pos,
_check_dims,
_find_metric,
_find_dim,
w_mean,
xgcm_weighted_mean,
interp_all,
calculate_rel_vorticity,
dll_dist,
)
def datasets():
xt = np.arange(4)
xu = xt + 0.5
yt = np.arange(4)
yu = yt + 0.5
# add a non x,y variable to test how its handled throughout.
t = np.arange(10)
# Need to add a tracer here to get the tracer dimsuffix
tr = xr.DataArray(
np.random.rand(len(xt), len(yt), len(t)),
coords=[("xt", xt), ("yt", yt), ("time", t)],
)
u_b = xr.DataArray(
np.random.rand(len(xt), len(yt), len(t)),
coords=[("xu", xu), ("yu", yu), ("time", t)],
)
v_b = xr.DataArray(
np.random.rand(len(xt), len(yt), len(t)),
coords=[("xu", xu), ("yu", yu), ("time", t)],
)
u_c = xr.DataArray(
np.random.rand(len(xt), len(yt), len(t)),
coords=[("xu", xu), ("yt", yt), ("time", t)],
)
v_c = xr.DataArray(
np.random.rand(len(xt), len(yt), len(t)),
coords=[("xt", xt), ("yu", yu), ("time", t)],
)
# maybe also add some other combo of x,t y,t arrays....
timeseries = xr.DataArray(np.random.rand(len(t)), coords=[("time", t)])
# northeast distance
dx = 0.3
dy = 2
dx_ne = xr.DataArray(np.ones([4, 4]) * dx - 0.1, coords=[("xu", xu), ("yu", yu)])
dx_n = xr.DataArray(np.ones([4, 4]) * dx - 0.2, coords=[("xt", xt), ("yu", yu)])
dx_e = xr.DataArray(np.ones([4, 4]) * dx - 0.3, coords=[("xu", xu), ("yt", yt)])
dx_t = xr.DataArray(np.ones([4, 4]) * dx - 0.4, coords=[("xt", xt), ("yt", yt)])
dy_ne = xr.DataArray(np.ones([4, 4]) * dy + 0.1, coords=[("xu", xu), ("yu", yu)])
dy_n = xr.DataArray(np.ones([4, 4]) * dy + 0.2, coords=[("xt", xt), ("yu", yu)])
dy_e = xr.DataArray(np.ones([4, 4]) * dy + 0.3, coords=[("xu", xu), ("yt", yt)])
dy_t = xr.DataArray(np.ones([4, 4]) * dy + 0.4, coords=[("xt", xt), ("yt", yt)])
area_ne = dx_ne * dy_ne
area_n = dx_n * dy_n
area_e = dx_e * dy_e
area_t = dx_t * dy_t
def _add_metrics(obj):
obj = obj.copy()
for name, data in zip(
[
"dx_ne",
"dx_n",
"dx_e",
"dx_t",
"dy_ne",
"dy_n",
"dy_e",
"dy_t",
"area_ne",
"area_n",
"area_e",
"area_t",
],
[
dx_ne,
dx_n,
dx_e,
dx_t,
dy_ne,
dy_n,
dy_e,
dy_t,
area_ne,
area_n,
area_e,
area_t,
],
):
obj.coords[name] = data
# add xgcm attrs
for ii in ["xu", "xt"]:
obj[ii].attrs["axis"] = "X"
for ii in ["yu", "yt"]:
obj[ii].attrs["axis"] = "Y"
for ii in ["xu", "yu"]:
obj[ii].attrs["c_grid_axis_shift"] = 0.5
return obj
coords = {
"X": {"center": "xt", "right": "xu"},
"Y": {"center": "yt", "right": "yu"},
}
coords_outer = {
"X": {"center": "xt", "outer": "xu"},
"Y": {"center": "yt", "outer": "yu"},
}
ds_b = _add_metrics(
xr.Dataset({"u": u_b, "v": v_b, "tracer": tr, "timeseries": timeseries})
)
ds_c = _add_metrics(
xr.Dataset({"u": u_c, "v": v_c, "tracer": tr, "timeseries": timeseries})
)
ds_fail = _add_metrics(
xr.Dataset({"u": u_b, "v": v_c, "tracer": tr, "timeseries": timeseries})
)
ds_fail2 = _add_metrics(
xr.Dataset({"u": u_b, "v": v_c, "tracer": tr, "timeseries": timeseries})
)
return {
"B": ds_b,
"C": ds_c,
"fail_gridtype": ds_fail,
"fail_dimtype": ds_fail2,
"coords": coords,
"fail_coords": coords_outer,
}
def test_find_metric():
datadict = datasets()
ds = datadict["C"]
metric_list = ["dx_n", "dx_e", "dx_t", "dx_ne"]
fail_metric_list = ["dx_n", "dy_n"]
assert _find_metric(ds["tracer"], metric_list) == "dx_t"
assert _find_metric(ds["u"], metric_list) == "dx_e"
assert _find_metric(ds["v"], metric_list) == "dx_n"
assert _find_metric(ds["u"].drop("dx_e"), metric_list) is None
with pytest.raises(ValueError):
_find_metric(ds["v"], fail_metric_list)
def test_find_dim():
datadict = datasets()
ds = datadict["C"]
grid = Grid(ds)
assert _find_dim(grid, ds, "X") == ["xt", "xu"]
assert _find_dim(grid, ds, "Z") is None
assert _find_dim(grid, ds["timeseries"], "X") is None
assert _find_dim(grid, ds["timeseries"], "X") is None
assert _find_dim(grid, ds["tracer"], "X") == ["xt"]
assert _find_dim(grid, ds["u"], "X") == ["xu"]
def test_get_name():
datadict = datasets()
ds = datadict["C"]
assert _get_name(ds.xt) == "xt"
def test_get_axis_pos():
datadict = datasets()
ds = datadict["C"]
coords = datadict["coords"]
grid = Grid(ds, coords=coords)
assert _get_axis_pos(grid, "X", ds.u) == "right"
assert _get_axis_pos(grid, "X", ds.tracer) == "center"
assert _get_axis_pos(grid, "Z", ds.u) is None
def test_infer_gridtype():
datadict = datasets()
coords = datadict["coords"]
ds_b = datadict["B"]
grid_b = Grid(ds_b, coords=coords)
ds_c = datadict["C"]
grid_c = Grid(ds_c, coords=coords)
# This should fail(unkown gridtype)
ds_fail = datadict["fail_gridtype"]
grid_fail = Grid(ds_fail, coords=coords)
# This is not supported yet ('inner' and 'outer' dims)
coords2 = datadict["fail_coords"]
ds_fail2 = datadict["fail_dimtype"]
grid_fail2 = Grid(ds_fail2, coords=coords2)
assert _infer_gridtype(grid_b, ds_b.u, ds_b.v) == "B"
assert _infer_gridtype(grid_c, ds_c.u, ds_c.v) == "C"
with pytest.raises(RuntimeError, match=r"Gridtype not recognized *"):
_infer_gridtype(grid_fail, ds_fail.u, ds_fail.v)
with pytest.raises(RuntimeError): # , match=r'`inner` or `outer` *'
_infer_gridtype(grid_fail2, ds_fail2.u, ds_fail2.v)
def test_check_dims():
datadict = datasets()
ds = datadict["C"]
assert _check_dims(ds.u, ds.u, "dummy")
with pytest.raises(RuntimeError):
_check_dims(ds.u, ds.v, "dummy")
@pytest.mark.parametrize(
"axis, metric_list",
[
("X", ["dx_t", "dx_e", "dx_n", "dx_ne"]),
("X", ["dy_t", "dy_e", "dy_n", "dy_ne"]),
],
)
@pytest.mark.parametrize("gridtype", ["B", "C"])
def test_w_mean(axis, metric_list, gridtype):
fail_metric_list = ["fail"]
ds = datasets()[gridtype]
grid = Grid(ds)
for var in ds.data_vars:
metric = _find_metric(ds[var], metric_list)
dim = _find_dim(grid, ds[var], axis)
a = w_mean(grid, ds[var], axis, metric_list, verbose=True)
if dim is None: # no dimension found, return the input arrays
b = ds[var]
else:
b = weighted_mean(ds[var], ds[metric], dim=dim)
assert_allclose(a, b)
# original array should be returned if a non matching metric list
# is supplied
a_fail = w_mean(grid, ds[var], axis, fail_metric_list)
assert_allclose(a_fail, ds[var])
@pytest.mark.parametrize(
"axis, metric_list",
[
("X", ["dx_t", "dx_e", "dx_n", "dx_ne"]),
("X", ["dy_t", "dy_e", "dy_n", "dy_ne"]),
],
)
@pytest.mark.parametrize("gridtype", ["B", "C"])
def test_xgcm_weighted_mean(axis, metric_list, gridtype):
ds = datasets()[gridtype]
grid = Grid(ds)
a = xgcm_weighted_mean(grid, ds, axis, metric_list)
for var in ["tracer", "u", "v"]:
b = w_mean(grid, ds[var], axis, metric_list)
c = xgcm_weighted_mean(grid, ds[var], axis, metric_list)
assert_allclose(a[var], b)
assert_allclose(b, c)
for var in ["timeseries"]:
b = ds[var]
c = xgcm_weighted_mean(grid, ds[var], axis, metric_list)
assert_allclose(a[var], b)
assert_allclose(b, c)
def test_calculate_rel_vorticity():
datadict = datasets()
coords = datadict["coords"]
ds_b = datadict["B"]
grid_b = Grid(ds_b, coords=coords)
ds_c = datadict["C"]
grid_c = Grid(ds_c, coords=coords)
test_b = (
grid_b.diff(grid_b.interp(ds_b.v * ds_b.dy_ne, "Y"), "X")
- grid_b.diff(grid_b.interp(ds_b.u * ds_b.dx_ne, "X"), "Y")
) / ds_b.area_t
zeta_b = calculate_rel_vorticity(
grid_b, ds_b.u, ds_b.v, ds_b.dx_ne, ds_b.dy_ne, ds_b.area_t, gridtype=None
)
test_c = (
grid_c.diff(ds_c.v * ds_c.dy_n, "X") - grid_c.diff(ds_c.u * ds_c.dx_e, "Y")
) / ds_c.area_ne
zeta_c = calculate_rel_vorticity(
grid_c, ds_c.u, ds_c.v, ds_c.dx_e, ds_c.dy_n, ds_c.area_ne, gridtype=None
)
assert_allclose(test_b, zeta_b)
assert_allclose(test_c, zeta_c)
with pytest.raises(RuntimeError):
zeta_c = calculate_rel_vorticity(
grid_b,
ds_c.u,
ds_c.v,
ds_c.dx_n, # wrong coordinate
ds_c.dy_n,
ds_c.area_ne,
gridtype=None,
)
def test_interp_all():
datadict = datasets()
coords = datadict["coords"]
ds_b = datadict["B"]
grid_b = Grid(ds_b, coords=coords)
ds_c = datadict["C"]
grid_c = Grid(ds_c, coords=coords)
for var in ["u", "v", "tracer"]:
for ds, grid in zip([ds_b, ds_c], [grid_b, grid_c]):
for target, control_dims in zip(
["center", "right"], [["xt", "yt", "time"], ["xu", "yu", "time"]]
):
print(ds)
print(grid)
ds_interp = interp_all(grid, ds, target=target)
assert set(ds_interp[var].dims) == set(control_dims)
assert set(ds_interp.coords) == set(ds.coords)
ds_interp_nocoords = interp_all(
grid, ds, target=target, keep_coords=False
)
assert set(ds_interp_nocoords.coords) != set(ds.coords)
def test_dll_dist():
lon = np.arange(-180, 180, 10)
lat = np.arange(-90, 90, 10)
llon, llat = np.meshgrid(lon, lat)
dlon = np.diff(llon, axis=1)
dlat = np.diff(llat, axis=0)
# lon = lon[1:]
# lat = lat[1:]
# llon = llon[1:, 1:]
# llat = llat[1:, 1:]
# dlon = dlon[1:, :]
# dlat = dlat[:, 1:]
lon = lon[:-1]
lat = lat[:-1]
llon = llon[:-1, :-1]
llat = llat[:-1, :-1]
dlon = dlon[:-1, :]
dlat = dlat[:, :-1]
# convert to xarrays
da_lon = xr.DataArray(lon, coords=[("lon", lon)])
da_lat = xr.DataArray(lat, coords=[("lat", lat)])
print(dlon.shape)
print(lon.shape)
print(lat.shape)
da_dlon = xr.DataArray(dlon, coords=[lat, lon], dims=["lat", "lon"])
da_dlat = xr.DataArray(dlat, coords=[lat, lon], dims=["lat", "lon"])
d_raw = 111000.0 # represents the diatance of 1 deg on the Eq in m
dx_test = dlon * np.cos(np.deg2rad(llat)) * d_raw
dy_test = dlat * d_raw
dy_test = dy_test.T
dx, dy = dll_dist(da_dlon, da_dlat, da_lon, da_lat)
np.testing.assert_allclose(dx.data, dx_test)
np.testing.assert_allclose(dx_test[:, 0], dx.data[:, 0])
np.testing.assert_allclose(dy.data, dy_test)
| 30.101036
| 85
| 0.545056
|
143d73863421fa15ad7e90eff6c07f6112b24793
| 3,596
|
py
|
Python
|
customer/views.py
|
rishav142k/deliver-app
|
89905c7341fdf17d124aec5042e2ff90ddd455e8
|
[
"Unlicense",
"MIT"
] | null | null | null |
customer/views.py
|
rishav142k/deliver-app
|
89905c7341fdf17d124aec5042e2ff90ddd455e8
|
[
"Unlicense",
"MIT"
] | null | null | null |
customer/views.py
|
rishav142k/deliver-app
|
89905c7341fdf17d124aec5042e2ff90ddd455e8
|
[
"Unlicense",
"MIT"
] | null | null | null |
import json
from django.shortcuts import render, redirect
from django.views import View
from django.core.mail import send_mail
from .models import MenuItem, Category, OrderModel
from django.http import JsonResponse
class Index(View):
def get(self, request, *args, **kwargs):
return render(request, 'customer/index.html')
class About(View):
def get(self, request, *args, **kwargs):
return render(request, 'customer/about.html')
class Order(View):
def get(self, request, *args, **kwargs):
# get every item from each category
starters = MenuItem.objects.filter(category__name__contains='Starter')
desserts = MenuItem.objects.filter(category__name__contains='Dessert')
main_courses = MenuItem.objects.filter(category__name__contains='Main Course')
entres = MenuItem.objects.filter(category__name__contains='Entre')
# pass into context
context = {
'starters' : starters,
'entres': entres,
'desserts': desserts,
'main_courses' : main_courses,
}
# render the template
return render(request, 'customer/order.html', context)
def post(self, request, *args, **kwargs):
name = request.POST.get('name')
email = request.POST.get('email')
street = request.POST.get('street')
city = request.POST.get('city')
state = request.POST.get('state')
zip_code = request.POST.get('zip')
order_items = {
'items': []
}
items = request.POST.getlist('items[]')
for item in items:
menu_item = MenuItem.objects.get(pk__contains=int(item))
item_data = {
'id': menu_item.pk,
'name': menu_item.name,
'price': menu_item.price
}
order_items['items'].append(item_data)
price = 0
item_ids = []
for item in order_items['items']:
price += item['price']
item_ids.append(item['id'])
order = OrderModel.objects.create(
price=price,
name=name,
email=email,
street=street,
city=city,
state=state,
zip_code=zip_code
)
order.items.add(*item_ids)
# context = {
# 'items': order_items['items'],
# 'price': price
# }
return redirect('order-confirmation', pk=order.pk)
class OrderConfirmation(View):
def get(self, request, pk, *args, **kwargs):
order = OrderModel.objects.get(pk=pk)
context = {
'pk': order.pk,
'items': order.items,
'price': order.price,
}
return render(request, 'customer/order_confirmation.html', context)
def post(self, request, pk, *args, **kwargs):
# print(request.body)
data = json.loads(request.body)
if data['isPaid'] :
order = OrderModel.objects.get(pk = pk)
order.is_paid = True
order.save()
return redirect('order-complete')
class Order_complete(View):
def get(self, request,*args, **kwargs):
return render(request, 'customer/order_confirmed.html')
class Menu(View):
def get(self, request, *args, **kwargs):
menu_items= MenuItem.objects.all()
context = {
'menu_items' : menu_items,
}
return render(request, 'customer/menu.html', context)
class MenuSearch(View):
def get(self, request, *args, **kwargs):
pass
| 27.875969
| 86
| 0.574805
|
0c8e9030eaba66b6453044714527016615b723ff
| 457
|
py
|
Python
|
stravaviz/tracks_drawer.py
|
matoous/stravaviz
|
350f8b7ac086d3bd84dfd66a6165b5407f562a32
|
[
"MIT"
] | null | null | null |
stravaviz/tracks_drawer.py
|
matoous/stravaviz
|
350f8b7ac086d3bd84dfd66a6165b5407f562a32
|
[
"MIT"
] | null | null | null |
stravaviz/tracks_drawer.py
|
matoous/stravaviz
|
350f8b7ac086d3bd84dfd66a6165b5407f562a32
|
[
"MIT"
] | null | null | null |
import argparse
import typing
import pint # type: ignore
import svgwrite # type: ignore
from stravaviz.track import Track
from stravaviz.xy import XY
class TracksDrawer:
"""Base class that other drawer classes inherit from."""
def __init__(self, tracks: typing.List[Track], _: argparse.Namespace):
self.tracks = tracks
def draw(self, dr: svgwrite.Drawing, g: svgwrite.container.Group, size: XY, offset: XY) -> None:
pass
| 24.052632
| 100
| 0.708972
|
c573de493c228519f92f57649f7b590d382b2740
| 58,111
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_models.py
|
rlfagan/azure-sdk-for-python
|
cc2e21d273e8fccfe1cfe020dc456b24cbddc23d
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/azure/storage/blob/_models.py
|
rlfagan/azure-sdk-for-python
|
cc2e21d273e8fccfe1cfe020dc456b24cbddc23d
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/azure/storage/blob/_models.py
|
rlfagan/azure-sdk-for-python
|
cc2e21d273e8fccfe1cfe020dc456b24cbddc23d
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, too-many-instance-attributes
# pylint: disable=super-init-not-called, too-many-lines
from enum import Enum
from azure.core.paging import PageIterator, ItemPaged
from azure.storage.blob._generated.models import FilterBlobItem
from ._shared import decode_base64_to_text
from ._shared.response_handlers import return_context_and_deserialized, process_storage_error
from ._shared.models import DictMixin, get_enum_value
from ._generated.models import Logging as GeneratedLogging
from ._generated.models import Metrics as GeneratedMetrics
from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy
from ._generated.models import StaticWebsite as GeneratedStaticWebsite
from ._generated.models import CorsRule as GeneratedCorsRule
from ._generated.models import AccessPolicy as GenAccessPolicy
from ._generated.models import StorageErrorException
from ._generated.models import BlobPrefix as GenBlobPrefix
from ._generated.models import BlobItemInternal
class BlobType(str, Enum):
BlockBlob = "BlockBlob"
PageBlob = "PageBlob"
AppendBlob = "AppendBlob"
class BlockState(str, Enum):
"""Block blob block types."""
Committed = 'Committed' #: Committed blocks.
Latest = 'Latest' #: Latest blocks.
Uncommitted = 'Uncommitted' #: Uncommitted blocks.
class StandardBlobTier(str, Enum):
"""
Specifies the blob tier to set the blob to. This is only applicable for
block blobs on standard storage accounts.
"""
Archive = 'Archive' #: Archive
Cool = 'Cool' #: Cool
Hot = 'Hot' #: Hot
class PremiumPageBlobTier(str, Enum):
"""
Specifies the page blob tier to set the blob to. This is only applicable to page
blobs on premium storage accounts. Please take a look at:
https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
for detailed information on the corresponding IOPS and throughput per PageBlobTier.
"""
P4 = 'P4' #: P4 Tier
P6 = 'P6' #: P6 Tier
P10 = 'P10' #: P10 Tier
P20 = 'P20' #: P20 Tier
P30 = 'P30' #: P30 Tier
P40 = 'P40' #: P40 Tier
P50 = 'P50' #: P50 Tier
P60 = 'P60' #: P60 Tier
class SequenceNumberAction(str, Enum):
"""Sequence number actions."""
Increment = 'increment'
"""
Increments the value of the sequence number by 1. If specifying this option,
do not include the x-ms-blob-sequence-number header.
"""
Max = 'max'
"""
Sets the sequence number to be the higher of the value included with the
request and the value currently stored for the blob.
"""
Update = 'update'
"""Sets the sequence number to the value included with the request."""
class PublicAccess(str, Enum):
"""
Specifies whether data in the container may be accessed publicly and the level of access.
"""
OFF = 'off'
"""
Specifies that there is no public read access for both the container and blobs within the container.
Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
"""
Blob = 'blob'
"""
Specifies public read access for blobs. Blob data within this container can be read
via anonymous request, but container data is not available. Clients cannot enumerate
blobs within the container via anonymous request.
"""
Container = 'container'
"""
Specifies full public read access for container and blob data. Clients can enumerate
blobs within the container via anonymous request, but cannot enumerate containers
within the storage account.
"""
class BlobAnalyticsLogging(GeneratedLogging):
"""Azure Analytics Logging settings.
:keyword str version:
The version of Storage Analytics to configure. The default value is 1.0.
:keyword bool delete:
Indicates whether all delete requests should be logged. The default value is `False`.
:keyword bool read:
Indicates whether all read requests should be logged. The default value is `False`.
:keyword bool write:
Indicates whether all write requests should be logged. The default value is `False`.
:keyword ~azure.storage.blob.RetentionPolicy retention_policy:
Determines how long the associated data should persist. If not specified the retention
policy will be disabled by default.
"""
def __init__(self, **kwargs):
self.version = kwargs.get('version', u'1.0')
self.delete = kwargs.get('delete', False)
self.read = kwargs.get('read', False)
self.write = kwargs.get('write', False)
self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
version=generated.version,
delete=generated.delete,
read=generated.read,
write=generated.write,
retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
)
class Metrics(GeneratedMetrics):
"""A summary of request statistics grouped by API in hour or minute aggregates
for blobs.
:keyword str version:
The version of Storage Analytics to configure. The default value is 1.0.
:keyword bool enabled:
Indicates whether metrics are enabled for the Blob service.
The default value is `False`.
:keyword bool include_apis:
Indicates whether metrics should generate summary statistics for called API operations.
:keyword ~azure.storage.blob.RetentionPolicy retention_policy:
Determines how long the associated data should persist. If not specified the retention
policy will be disabled by default.
"""
def __init__(self, **kwargs):
self.version = kwargs.get('version', u'1.0')
self.enabled = kwargs.get('enabled', False)
self.include_apis = kwargs.get('include_apis')
self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy()
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
version=generated.version,
enabled=generated.enabled,
include_apis=generated.include_apis,
retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access
)
class RetentionPolicy(GeneratedRetentionPolicy):
"""The retention policy which determines how long the associated data should
persist.
:param bool enabled:
Indicates whether a retention policy is enabled for the storage service.
The default value is False.
:param int days:
Indicates the number of days that metrics or logging or
soft-deleted data should be retained. All data older than this value will
be deleted. If enabled=True, the number of days must be specified.
"""
def __init__(self, enabled=False, days=None):
self.enabled = enabled
self.days = days
if self.enabled and (self.days is None):
raise ValueError("If policy is enabled, 'days' must be specified.")
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
enabled=generated.enabled,
days=generated.days,
)
class StaticWebsite(GeneratedStaticWebsite):
"""The properties that enable an account to host a static website.
:keyword bool enabled:
Indicates whether this account is hosting a static website.
The default value is `False`.
:keyword str index_document:
The default name of the index page under each directory.
:keyword str error_document404_path:
The absolute path of the custom 404 page.
"""
def __init__(self, **kwargs):
self.enabled = kwargs.get('enabled', False)
if self.enabled:
self.index_document = kwargs.get('index_document')
self.error_document404_path = kwargs.get('error_document404_path')
else:
self.index_document = None
self.error_document404_path = None
@classmethod
def _from_generated(cls, generated):
if not generated:
return cls()
return cls(
enabled=generated.enabled,
index_document=generated.index_document,
error_document404_path=generated.error_document404_path,
)
class CorsRule(GeneratedCorsRule):
"""CORS is an HTTP feature that enables a web application running under one
domain to access resources in another domain. Web browsers implement a
security restriction known as same-origin policy that prevents a web page
from calling APIs in a different domain; CORS provides a secure way to
allow one domain (the origin domain) to call APIs in another domain.
:param list(str) allowed_origins:
A list of origin domains that will be allowed via CORS, or "*" to allow
all domains. The list of must contain at least one entry. Limited to 64
origin domains. Each allowed origin can have up to 256 characters.
:param list(str) allowed_methods:
A list of HTTP methods that are allowed to be executed by the origin.
The list of must contain at least one entry. For Azure Storage,
permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT.
:keyword list(str) allowed_headers:
Defaults to an empty list. A list of headers allowed to be part of
the cross-origin request. Limited to 64 defined headers and 2 prefixed
headers. Each header can be up to 256 characters.
:keyword list(str) exposed_headers:
Defaults to an empty list. A list of response headers to expose to CORS
clients. Limited to 64 defined headers and two prefixed headers. Each
header can be up to 256 characters.
:keyword int max_age_in_seconds:
The number of seconds that the client/browser should cache a
preflight response.
"""
def __init__(self, allowed_origins, allowed_methods, **kwargs):
self.allowed_origins = ','.join(allowed_origins)
self.allowed_methods = ','.join(allowed_methods)
self.allowed_headers = ','.join(kwargs.get('allowed_headers', []))
self.exposed_headers = ','.join(kwargs.get('exposed_headers', []))
self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0)
@classmethod
def _from_generated(cls, generated):
return cls(
[generated.allowed_origins],
[generated.allowed_methods],
allowed_headers=[generated.allowed_headers],
exposed_headers=[generated.exposed_headers],
max_age_in_seconds=generated.max_age_in_seconds,
)
class ContainerProperties(DictMixin):
"""Blob container's properties class.
Returned ``ContainerProperties`` instances expose these values through a
dictionary interface, for example: ``container_props["last_modified"]``.
Additionally, the container name is available as ``container_props["name"]``.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the container was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar ~azure.storage.blob.LeaseProperties lease:
Stores all the lease information for the container.
:ivar str public_access: Specifies whether data in the container may be accessed
publicly and the level of access.
:ivar bool has_immutability_policy:
Represents whether the container has an immutability policy.
:ivar bool has_legal_hold:
Represents whether the container has a legal hold.
:ivar dict metadata: A dict with name-value pairs to associate with the
container as metadata.
:ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope:
The default encryption scope configuration for the container.
"""
def __init__(self, **kwargs):
self.name = None
self.last_modified = kwargs.get('Last-Modified')
self.etag = kwargs.get('ETag')
self.lease = LeaseProperties(**kwargs)
self.public_access = kwargs.get('x-ms-blob-public-access')
self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy')
self.deleted = None
self.version = None
self.has_legal_hold = kwargs.get('x-ms-has-legal-hold')
self.metadata = kwargs.get('metadata')
self.encryption_scope = None
default_encryption_scope = kwargs.get('x-ms-default-encryption-scope')
if default_encryption_scope:
self.encryption_scope = ContainerEncryptionScope(
default_encryption_scope=default_encryption_scope,
prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False)
)
@classmethod
def _from_generated(cls, generated):
props = cls()
props.name = generated.name
props.last_modified = generated.properties.last_modified
props.etag = generated.properties.etag
props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
props.public_access = generated.properties.public_access
props.has_immutability_policy = generated.properties.has_immutability_policy
props.deleted = generated.deleted
props.version = generated.version
props.has_legal_hold = generated.properties.has_legal_hold
props.metadata = generated.metadata
props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access
return props
class ContainerPropertiesPaged(PageIterator):
"""An Iterable of Container properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A container name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.blob.ContainerProperties)
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only containers whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of container names to retrieve per
call.
:param str continuation_token: An opaque continuation token.
"""
def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
super(ContainerPropertiesPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.prefix = prefix
self.marker = None
self.results_per_page = results_per_page
self.location_mode = None
self.current_page = []
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.prefix = self._response.prefix
self.marker = self._response.marker
self.results_per_page = self._response.max_results
self.current_page = [self._build_item(item) for item in self._response.container_items]
return self._response.next_marker or None, self.current_page
@staticmethod
def _build_item(item):
return ContainerProperties._from_generated(item) # pylint: disable=protected-access
class BlobProperties(DictMixin):
"""
Blob Properties.
:ivar str name:
The name of the blob.
:ivar str container:
The container in which the blob resides.
:ivar str snapshot:
Datetime value that uniquely identifies the blob snapshot.
:ivar ~azure.blob.storage.BlobType blob_type:
String indicating this blob's type.
:ivar dict metadata:
Name-value pairs associated with the blob as metadata.
:ivar ~datetime.datetime last_modified:
A datetime object representing the last time the blob was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int size:
The size of the content returned. If the entire blob was requested,
the length of blob in bytes. If a subset of the blob was requested, the
length of the returned subset.
:ivar str content_range:
Indicates the range of bytes returned in the event that the client
requested a subset of the blob.
:ivar int append_blob_committed_block_count:
(For Append Blobs) Number of committed blocks in the blob.
:ivar int page_blob_sequence_number:
(For Page Blobs) Sequence number for page blob used for coordinating
concurrent writes.
:ivar bool server_encrypted:
Set to true if the blob is encrypted on the server.
:ivar ~azure.storage.blob.CopyProperties copy:
Stores all the copy properties for the blob.
:ivar ~azure.storage.blob.ContentSettings content_settings:
Stores all the content settings for the blob.
:ivar ~azure.storage.blob.LeaseProperties lease:
Stores all the lease information for the blob.
:ivar ~azure.storage.blob.StandardBlobTier blob_tier:
Indicates the access tier of the blob. The hot tier is optimized
for storing data that is accessed frequently. The cool storage tier
is optimized for storing data that is infrequently accessed and stored
for at least a month. The archive tier is optimized for storing
data that is rarely accessed and stored for at least six months
with flexible latency requirements.
:ivar ~datetime.datetime blob_tier_change_time:
Indicates when the access tier was last changed.
:ivar bool blob_tier_inferred:
Indicates whether the access tier was inferred by the service.
If false, it indicates that the tier was set explicitly.
:ivar bool deleted:
Whether this blob was deleted.
:ivar ~datetime.datetime deleted_time:
A datetime object representing the time at which the blob was deleted.
:ivar int remaining_retention_days:
The number of days that the blob will be retained before being permanently deleted by the service.
:ivar ~datetime.datetime creation_time:
Indicates when the blob was created, in UTC.
:ivar str archive_status:
Archive status of blob.
:ivar str encryption_key_sha256:
The SHA-256 hash of the provided encryption key.
:ivar str encryption_scope:
A predefined encryption scope used to encrypt the data on the service. An encryption
scope can be created using the Management API and referenced here by name. If a default
encryption scope has been defined at the container, this value will override it if the
container-level scope is configured to allow overrides. Otherwise an error will be raised.
:ivar bool request_server_encrypted:
Whether this blob is encrypted.
:ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties:
Only present for blobs that have policy ids and rule ids applied to them.
:ivar str object_replication_destination_policy:
Represents the Object Replication Policy Id that created this blob.
:ivar int tag_count:
Tags count on this blob.
:ivar dict(str, str) tags:
Key value pair of tags on this blob.
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.container = None
self.snapshot = kwargs.get('x-ms-snapshot')
self.version_id = kwargs.get('x-ms-version-id')
self.is_current_version = kwargs.get('x-ms-is-current-version')
self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None
self.metadata = kwargs.get('metadata')
self.encrypted_metadata = kwargs.get('encrypted_metadata')
self.last_modified = kwargs.get('Last-Modified')
self.etag = kwargs.get('ETag')
self.size = kwargs.get('Content-Length')
self.content_range = kwargs.get('Content-Range')
self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count')
self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number')
self.server_encrypted = kwargs.get('x-ms-server-encrypted')
self.copy = CopyProperties(**kwargs)
self.content_settings = ContentSettings(**kwargs)
self.lease = LeaseProperties(**kwargs)
self.blob_tier = kwargs.get('x-ms-access-tier')
self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time')
self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred')
self.deleted = False
self.deleted_time = None
self.remaining_retention_days = None
self.creation_time = kwargs.get('x-ms-creation-time')
self.archive_status = kwargs.get('x-ms-archive-status')
self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256')
self.encryption_scope = kwargs.get('x-ms-encryption-scope')
self.request_server_encrypted = kwargs.get('x-ms-server-encrypted')
self.object_replication_source_properties = kwargs.get('object_replication_source_properties')
self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id')
self.tag_count = kwargs.get('x-ms-tag-count')
self.tags = None
@classmethod
def _from_generated(cls, generated):
blob = BlobProperties()
blob.name = generated.name
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = blob._parse_tags(generated.blob_tags) # pylint: disable=protected-access
return blob
@staticmethod
def _parse_tags(generated_tags):
# type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
"""Deserialize a list of BlobTag objects into a dict.
"""
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
class BlobPropertiesPaged(PageIterator):
"""An Iterable of Blob properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A blob name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.blob.BlobProperties)
:ivar str container: The container that the blobs are listed from.
:ivar str delimiter: A delimiting character used for hierarchy listing.
:param callable command: Function to retrieve the next page of items.
:param str container: The name of the container.
:param str prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of blobs to retrieve per
call.
:param str continuation_token: An opaque continuation token.
:param str delimiter:
Used to capture blobs whose names begin with the same substring up to
the appearance of the delimiter character. The delimiter may be a single
character or a string.
:param location_mode: Specifies the location the request should be sent to.
This mode only applies for RA-GRS accounts which allow secondary read access.
Options include 'primary' or 'secondary'.
"""
def __init__(
self, command,
container=None,
prefix=None,
results_per_page=None,
continuation_token=None,
delimiter=None,
location_mode=None):
super(BlobPropertiesPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.prefix = prefix
self.marker = None
self.results_per_page = results_per_page
self.container = container
self.delimiter = delimiter
self.current_page = None
self.location_mode = location_mode
def _get_next_cb(self, continuation_token):
try:
return self._command(
prefix=self.prefix,
marker=continuation_token or None,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.prefix = self._response.prefix
self.marker = self._response.marker
self.results_per_page = self._response.max_results
self.container = self._response.container_name
self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
return self._response.next_marker or None, self.current_page
def _build_item(self, item):
if isinstance(item, BlobProperties):
return item
if isinstance(item, BlobItemInternal):
blob = BlobProperties._from_generated(item) # pylint: disable=protected-access
blob.container = self.container
return blob
return item
class FilteredBlob(DictMixin):
"""Blob info from a Filter Blobs API call.
:ivar name: Blob name
:type name: str
:ivar container_name: Container name.
:type container_name: str
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name', None)
self.container_name = kwargs.get('container_name', None)
class FilteredBlobPaged(PageIterator):
"""An Iterable of Blob properties.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A blob name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str continuation_token: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.blob.FilteredBlob)
:ivar str container: The container that the blobs are listed from.
:param callable command: Function to retrieve the next page of items.
:param str container: The name of the container.
:param int results_per_page: The maximum number of blobs to retrieve per
call.
:param str continuation_token: An opaque continuation token.
:param location_mode: Specifies the location the request should be sent to.
This mode only applies for RA-GRS accounts which allow secondary read access.
Options include 'primary' or 'secondary'.
"""
def __init__(
self, command,
container=None,
results_per_page=None,
continuation_token=None,
location_mode=None):
super(FilteredBlobPaged, self).__init__(
get_next=self._get_next_cb,
extract_data=self._extract_data_cb,
continuation_token=continuation_token or ""
)
self._command = command
self.service_endpoint = None
self.marker = continuation_token
self.results_per_page = results_per_page
self.container = container
self.current_page = None
self.location_mode = location_mode
def _get_next_cb(self, continuation_token):
try:
return self._command(
marker=continuation_token or None,
maxresults=self.results_per_page,
cls=return_context_and_deserialized,
use_location=self.location_mode)
except StorageErrorException as error:
process_storage_error(error)
def _extract_data_cb(self, get_next_return):
self.location_mode, self._response = get_next_return
self.service_endpoint = self._response.service_endpoint
self.marker = self._response.next_marker
self.current_page = [self._build_item(item) for item in self._response.blobs]
return self._response.next_marker or None, self.current_page
@staticmethod
def _build_item(item):
if isinstance(item, FilterBlobItem):
blob = FilteredBlob(name=item.name, container_name=item.container_name) # pylint: disable=protected-access
return blob
return item
class BlobPrefix(ItemPaged, DictMixin):
"""An Iterable of Blob properties.
Returned from walk_blobs when a delimiter is used.
Can be thought of as a virtual blob directory.
:ivar str name: The prefix, or "directory name" of the blob.
:ivar str service_endpoint: The service URL.
:ivar str prefix: A blob name prefix being used to filter the list.
:ivar str marker: The continuation token of the current page of results.
:ivar int results_per_page: The maximum number of results retrieved per API call.
:ivar str next_marker: The continuation token to retrieve the next page of results.
:ivar str location_mode: The location mode being used to list results. The available
options include "primary" and "secondary".
:ivar current_page: The current page of listed results.
:vartype current_page: list(~azure.storage.blob.BlobProperties)
:ivar str container: The container that the blobs are listed from.
:ivar str delimiter: A delimiting character used for hierarchy listing.
:param callable command: Function to retrieve the next page of items.
:param str prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param int results_per_page: The maximum number of blobs to retrieve per
call.
:param str marker: An opaque continuation token.
:param str delimiter:
Used to capture blobs whose names begin with the same substring up to
the appearance of the delimiter character. The delimiter may be a single
character or a string.
:param location_mode: Specifies the location the request should be sent to.
This mode only applies for RA-GRS accounts which allow secondary read access.
Options include 'primary' or 'secondary'.
"""
def __init__(self, *args, **kwargs):
super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
self.name = kwargs.get('prefix')
self.prefix = kwargs.get('prefix')
self.results_per_page = kwargs.get('results_per_page')
self.container = kwargs.get('container')
self.delimiter = kwargs.get('delimiter')
self.location_mode = kwargs.get('location_mode')
class BlobPrefixPaged(BlobPropertiesPaged):
def __init__(self, *args, **kwargs):
super(BlobPrefixPaged, self).__init__(*args, **kwargs)
self.name = self.prefix
def _extract_data_cb(self, get_next_return):
continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
self.current_page = [self._build_item(item) for item in self.current_page]
self.delimiter = self._response.delimiter
return continuation_token, self.current_page
def _build_item(self, item):
item = super(BlobPrefixPaged, self)._build_item(item)
if isinstance(item, GenBlobPrefix):
return BlobPrefix(
self._command,
container=self.container,
prefix=item.name,
results_per_page=self.results_per_page,
location_mode=self.location_mode)
return item
class LeaseProperties(DictMixin):
"""Blob Lease Properties.
:ivar str status:
The lease status of the blob. Possible values: locked|unlocked
:ivar str state:
Lease state of the blob. Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a blob is leased, specifies whether the lease is of infinite or fixed duration.
"""
def __init__(self, **kwargs):
self.status = get_enum_value(kwargs.get('x-ms-lease-status'))
self.state = get_enum_value(kwargs.get('x-ms-lease-state'))
self.duration = get_enum_value(kwargs.get('x-ms-lease-duration'))
@classmethod
def _from_generated(cls, generated):
lease = cls()
lease.status = get_enum_value(generated.properties.lease_status)
lease.state = get_enum_value(generated.properties.lease_state)
lease.duration = get_enum_value(generated.properties.lease_duration)
return lease
class ContentSettings(DictMixin):
"""The content settings of a blob.
:param str content_type:
The content type specified for the blob. If no content type was
specified, the default content type is application/octet-stream.
:param str content_encoding:
If the content_encoding has previously been set
for the blob, that value is stored.
:param str content_language:
If the content_language has previously been set
for the blob, that value is stored.
:param str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the blob, that value is stored.
:param str cache_control:
If the cache_control has previously been set for
the blob, that value is stored.
:param str content_md5:
If the content_md5 has been set for the blob, this response
header is stored so that the client can check for message content
integrity.
"""
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None, **kwargs):
self.content_type = content_type or kwargs.get('Content-Type')
self.content_encoding = content_encoding or kwargs.get('Content-Encoding')
self.content_language = content_language or kwargs.get('Content-Language')
self.content_md5 = content_md5 or kwargs.get('Content-MD5')
self.content_disposition = content_disposition or kwargs.get('Content-Disposition')
self.cache_control = cache_control or kwargs.get('Cache-Control')
@classmethod
def _from_generated(cls, generated):
settings = cls()
settings.content_type = generated.properties.content_type or None
settings.content_encoding = generated.properties.content_encoding or None
settings.content_language = generated.properties.content_language or None
settings.content_md5 = generated.properties.content_md5 or None
settings.content_disposition = generated.properties.content_disposition or None
settings.cache_control = generated.properties.cache_control or None
return settings
class CopyProperties(DictMixin):
"""Blob Copy Properties.
These properties will be `None` if this blob has never been the destination
in a Copy Blob operation, or if this blob has been modified after a concluded
Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List.
:ivar str id:
String identifier for the last attempted Copy Blob operation where this blob
was the destination blob.
:ivar str source:
URL up to 2 KB in length that specifies the source blob used in the last attempted
Copy Blob operation where this blob was the destination blob.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don't cause failure.
aborted:
Copy was ended by Abort Copy Blob.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy Blob operation where this blob was the destination blob. Can show
between 0 and Content-Length bytes copied.
:ivar ~datetime.datetime completion_time:
Conclusion time of the last attempted Copy Blob operation where this blob was the
destination blob. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
:ivar bool incremental_copy:
Copies the snapshot of the source page blob to a destination page blob.
The snapshot is copied such that only the differential changes between
the previously copied snapshot are transferred to the destination
:ivar ~datetime.datetime destination_snapshot:
Included if the blob is incremental copy blob or incremental copy snapshot,
if x-ms-copy-status is success. Snapshot time of the last successful
incremental copy snapshot for this blob.
"""
def __init__(self, **kwargs):
self.id = kwargs.get('x-ms-copy-id')
self.source = kwargs.get('x-ms-copy-source')
self.status = get_enum_value(kwargs.get('x-ms-copy-status'))
self.progress = kwargs.get('x-ms-copy-progress')
self.completion_time = kwargs.get('x-ms-copy-completion_time')
self.status_description = kwargs.get('x-ms-copy-status-description')
self.incremental_copy = kwargs.get('x-ms-incremental-copy')
self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot')
@classmethod
def _from_generated(cls, generated):
copy = cls()
copy.id = generated.properties.copy_id or None
copy.status = get_enum_value(generated.properties.copy_status) or None
copy.source = generated.properties.copy_source or None
copy.progress = generated.properties.copy_progress or None
copy.completion_time = generated.properties.copy_completion_time or None
copy.status_description = generated.properties.copy_status_description or None
copy.incremental_copy = generated.properties.incremental_copy or None
copy.destination_snapshot = generated.properties.destination_snapshot or None
return copy
class BlobBlock(DictMixin):
"""BlockBlob Block class.
:param str block_id:
Block id.
:param str state:
Block state. Possible values: committed|uncommitted
:ivar int size:
Block size in bytes.
"""
def __init__(self, block_id, state=BlockState.Latest):
self.id = block_id
self.state = state
self.size = None
@classmethod
def _from_generated(cls, generated):
block = cls(decode_base64_to_text(generated.name))
block.size = generated.size
return block
class PageRange(DictMixin):
"""Page Range for page blob.
:param int start:
Start of page range in bytes.
:param int end:
End of page range in bytes.
"""
def __init__(self, start=None, end=None):
self.start = start
self.end = end
class AccessPolicy(GenAccessPolicy):
"""Access Policy class used by the set and get access policy methods in each service.
A stored access policy can specify the start time, expiry time, and
permissions for the Shared Access Signatures with which it's associated.
Depending on how you want to control access to your resource, you can
specify all of these parameters within the stored access policy, and omit
them from the URL for the Shared Access Signature. Doing so permits you to
modify the associated signature's behavior at any time, as well as to revoke
it. Or you can specify one or more of the access policy parameters within
the stored access policy, and the others on the URL. Finally, you can
specify all of the parameters on the URL. In this case, you can use the
stored access policy to revoke the signature, but not to modify its behavior.
Together the Shared Access Signature and the stored access policy must
include all fields required to authenticate the signature. If any required
fields are missing, the request will fail. Likewise, if a field is specified
both in the Shared Access Signature URL and in the stored access policy, the
request will fail with status code 400 (Bad Request).
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.blob.ContainerSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: ~datetime.datetime or str
"""
def __init__(self, permission=None, expiry=None, start=None):
self.start = start
self.expiry = expiry
self.permission = permission
class ContainerSasPermissions(object):
"""ContainerSasPermissions class to be used with the
:func:`~azure.storage.blob.generate_container_sas` function and
for the AccessPolicies used with
:func:`~azure.storage.blob.ContainerClient.set_container_access_policy`.
:param bool read:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:param bool write:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
:param bool delete:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:param bool delete_previous_version:
Delete the previous blob version for the versioning enabled storage account.
:param bool list:
List blobs in the container.
:param bool tag:
Set or get tags on the blobs in the container.
"""
def __init__(self, read=False, write=False, delete=False, list=False, delete_previous_version=False, tag=False): # pylint: disable=redefined-builtin
self.read = read
self.write = write
self.delete = delete
self.list = list
self.delete_previous_version = delete_previous_version
self.tag = tag
self._str = (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('x' if self.delete_previous_version else '') +
('l' if self.list else '') +
('t' if self.tag else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a ContainerSasPermissions from a string.
To specify read, write, delete, or list permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, write, delete,
and list permissions.
:return: A ContainerSasPermissions object
:rtype: ~azure.storage.blob.ContainerSasPermissions
"""
p_read = 'r' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_list = 'l' in permission
p_delete_previous_version = 'x' in permission
p_tag = 't' in permission
parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list,
delete_previous_version=p_delete_previous_version, tag=p_tag)
parsed._str = permission # pylint: disable = protected-access
return parsed
class BlobSasPermissions(object):
"""BlobSasPermissions class to be used with the
:func:`~azure.storage.blob.generate_blob_sas` function.
:param bool read:
Read the content, properties, metadata and block list. Use the blob as
the source of a copy operation.
:param bool add:
Add a block to an append blob.
:param bool create:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:param bool write:
Create or write content, properties, metadata, or block list. Snapshot
or lease the blob. Resize the blob (page blob only). Use the blob as the
destination of a copy operation within the same account.
:param bool delete:
Delete the blob.
:param bool delete_previous_version:
Delete the previous blob version for the versioning enabled storage account.
:param bool tag:
Set or get tags on the blob.
"""
def __init__(self, read=False, add=False, create=False, write=False,
delete=False, delete_previous_version=False, tag=True):
self.read = read
self.add = add
self.create = create
self.write = write
self.delete = delete
self.delete_previous_version = delete_previous_version
self.tag = tag
self._str = (('r' if self.read else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('x' if self.delete_previous_version else '') +
('t' if self.tag else ''))
def __str__(self):
return self._str
@classmethod
def from_string(cls, permission):
"""Create a BlobSasPermissions from a string.
To specify read, add, create, write, or delete permissions you need only to
include the first letter of the word in the string. E.g. For read and
write permissions, you would provide a string "rw".
:param str permission: The string which dictates the read, add, create,
write, or delete permissions.
:return: A BlobSasPermissions object
:rtype: ~azure.storage.blob.BlobSasPermissions
"""
p_read = 'r' in permission
p_add = 'a' in permission
p_create = 'c' in permission
p_write = 'w' in permission
p_delete = 'd' in permission
p_delete_previous_version = 'x' in permission
p_tag = 't' in permission
parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete,
delete_previous_version=p_delete_previous_version, tag=p_tag)
parsed._str = permission # pylint: disable = protected-access
return parsed
class CustomerProvidedEncryptionKey(object):
"""
All data in Azure Storage is encrypted at-rest using an account-level encryption key.
In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents
and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service.
When you use a customer-provided key, Azure Storage does not manage or persist your key.
When writing data to a blob, the provided key is used to encrypt your data before writing it to disk.
A SHA-256 hash of the encryption key is written alongside the blob contents,
and is used to verify that all subsequent operations against the blob use the same encryption key.
This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob.
When reading a blob, the provided key is used to decrypt your data after reading it from disk.
In both cases, the provided encryption key is securely discarded
as soon as the encryption or decryption process completes.
:param str key_value:
Base64-encoded AES-256 encryption key value.
:param str key_hash:
Base64-encoded SHA256 of the encryption key.
:ivar str algorithm:
Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
"""
def __init__(self, key_value, key_hash):
self.key_value = key_value
self.key_hash = key_hash
self.algorithm = 'AES256'
class ContainerEncryptionScope(object):
"""The default encryption scope configuration for a container.
This scope is used implicitly for all future writes within the container,
but can be overridden per blob operation.
.. versionadded:: 12.2.0
:param str default_encryption_scope:
Specifies the default encryption scope to set on the container and use for
all future writes.
:param bool prevent_encryption_scope_override:
If true, prevents any request from specifying a different encryption scope than the scope
set on the container. Default value is false.
"""
def __init__(self, default_encryption_scope, **kwargs):
self.default_encryption_scope = default_encryption_scope
self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False)
@classmethod
def _from_generated(cls, generated):
if generated.properties.default_encryption_scope:
scope = cls(
generated.properties.default_encryption_scope,
prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False
)
return scope
return None
class DelimitedJSON(object):
"""Defines the input or output JSON serialization for a blob data query.
:keyword str delimiter: The line separator character, default value is '\n'
"""
def __init__(self, **kwargs):
self.delimiter = kwargs.pop('delimiter', '\n')
class DelimitedTextDialect(object):
"""Defines the input or output delimited (CSV) serialization for a blob query request.
:keyword str delimiter:
Column separator, defaults to ','.
:keyword str quotechar:
Field quote, defaults to '"'.
:keyword str lineterminator:
Record separator, defaults to '\n'.
:keyword str escapechar:
Escape char, defaults to empty.
:keyword bool has_header:
Whether the blob data includes headers in the first line. The default value is False, meaning that the
data will be returned inclusive of the first line. If set to True, the data will be returned exclusive
of the first line.
"""
def __init__(self, **kwargs):
self.delimiter = kwargs.pop('delimiter', ',')
self.quotechar = kwargs.pop('quotechar', '"')
self.lineterminator = kwargs.pop('lineterminator', '\n')
self.escapechar = kwargs.pop('escapechar', "")
self.has_header = kwargs.pop('has_header', False)
class ObjectReplicationPolicy(DictMixin):
"""Policy id and rule ids applied to a blob.
:ivar str policy_id:
Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair.
:ivar list(~azure.storage.blob.ObjectReplicationRule) rules:
Within each policy there may be multiple replication rules.
e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3
"""
def __init__(self, **kwargs):
self.policy_id = kwargs.pop('policy_id', None)
self.rules = kwargs.pop('rules', None)
class ObjectReplicationRule(DictMixin):
"""Policy id and rule ids applied to a blob.
:ivar str rule_id:
Rule id.
:ivar str status:
The status of the rule. It could be "Complete" or "Failed"
"""
def __init__(self, **kwargs):
self.rule_id = kwargs.pop('rule_id', None)
self.status = kwargs.pop('status', None)
class BlobQueryError(object):
"""The error happened during quick query operation.
:ivar str error:
The name of the error.
:ivar bool is_fatal:
If true, this error prevents further query processing. More result data may be returned,
but there is no guarantee that all of the original data will be processed.
If false, this error does not prevent further query processing.
:ivar str description:
A description of the error.
:ivar int position:
The blob offset at which the error occurred.
"""
def __init__(self, error=None, is_fatal=False, description=None, position=None):
self.error = error
self.is_fatal = is_fatal
self.description = description
self.position = position
| 44.190875
| 153
| 0.690936
|
4067a363f6f11f05bbac72af5870ff4d364e7b62
| 9,678
|
py
|
Python
|
python/dnsspec.py
|
burakbudanur/dnsbox
|
13ae8b76c54bd45a7bd7902aa88097be783415d4
|
[
"MIT"
] | 1
|
2022-02-18T08:11:15.000Z
|
2022-02-18T08:11:15.000Z
|
python/dnsspec.py
|
burakbudanur/dnsbox
|
13ae8b76c54bd45a7bd7902aa88097be783415d4
|
[
"MIT"
] | 2
|
2021-07-20T12:19:11.000Z
|
2021-07-21T20:29:59.000Z
|
python/dnsspec.py
|
burakbudanur/dnsbox
|
13ae8b76c54bd45a7bd7902aa88097be783415d4
|
[
"MIT"
] | 4
|
2020-12-12T08:18:37.000Z
|
2022-03-03T14:52:53.000Z
|
#!/usr/bin/env python3
import argparse
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
import dns
def main():
parser = argparse.ArgumentParser("Computes direction-dependent spectra.")
parser.add_argument(
"statePath",
type=str,
help="path to the state (or a folder containing states if average) of interest.",
)
parser.add_argument(
"--tex", action="store_true", dest="tex", help="use LaTeX to render text."
)
parser.add_argument(
"--noshow", action="store_true", dest="noshow", help="do not display the plots."
)
parser.add_argument(
"--harmonics",
action="store_true",
dest="harmonics",
help="plot in terms of harmonics.",
)
parser.add_argument(
"--iso",
action="store_true",
dest="iso",
help="compute isotropic spectrum as well.",
)
parser.add_argument(
"--savetxt",
action="store_true",
dest="savetxt",
help="save results as text files",
)
parser.add_argument(
"--average",
action="store_true",
dest="average",
help="average multiple states.",
)
parser.add_argument(
"--si", type=int, help="initial state to average.",
)
parser.add_argument(
"--sf", type=int, help="final state to average.",
)
args = vars(parser.parse_args())
statePath = Path(args["statePath"])
tex = args["tex"]
noshow = args["noshow"]
harmonics = args["harmonics"]
iso = args["iso"]
savetxt = args["savetxt"]
average = args["average"]
si = args["si"]
sf = args["sf"]
if not average:
state, header = dns.readState(statePath)
(spec_x, spec_y, spec_z, Lx, Lz, spec_iso, dissipation,) = dnsspec(
state, header, iso=iso, compute_dissipation=False
)
else:
nstates = sf - si + 1
print("Finding the average state.")
state_avg = None
for i in tqdm(range(si, sf + 1)):
state_i = statePath / f"state.{str(i).zfill(6)}"
state, header = dns.readState(state_i)
if state_avg is None:
state_avg = state
else:
state_avg += state
state_avg /= nstates
print("Averaging spectra of fluctuations.")
specs_x = None
specs_y = None
specs_z = None
dissipations = None
if iso:
specs_iso = None
for i in tqdm(range(si, sf + 1)):
state_i = statePath / f"state.{str(i).zfill(6)}"
state, header = dns.readState(state_i)
(spec_x, spec_y, spec_z, Lx, Lz, spec_iso, dissipation,) = dnsspec(
state - state_avg, header, iso=iso, compute_dissipation=True
)
if specs_x is None:
specs_x = spec_x
else:
specs_x += spec_x
if specs_y is None:
specs_y = spec_y
else:
specs_y += spec_y
if specs_z is None:
specs_z = spec_z
else:
specs_z += spec_z
if dissipations is None:
dissipations = dissipation
else:
dissipations += dissipation
if iso:
if specs_iso is None:
specs_iso = spec_iso
else:
specs_iso += spec_iso
spec_x = specs_x / nstates
spec_y = specs_y / nstates
spec_z = specs_z / nstates
dissipation = dissipations / nstates
if iso:
spec_iso = specs_iso / nstates
if not harmonics:
wavenums_x = np.arange(spec_x.shape[0]) * (2 * np.pi / Lx)
wavenums_y = np.arange(spec_y.shape[0]) * (2 * np.pi / dns.Ly)
wavenums_z = np.arange(spec_z.shape[0]) * (2 * np.pi / Lz)
else:
wavenums_x = np.arange(spec_x.shape[0])
wavenums_y = np.arange(spec_y.shape[0])
wavenums_z = np.arange(spec_z.shape[0])
dns.setPlotDefaults(tex=tex)
figuresDir = dns.createFiguresDir(statePath.parent)
if savetxt and average:
np.savetxt(
figuresDir / f"{statePath.name}_dissipation.dat", np.array([dissipation]),
)
# log log versions
fig, ax = plt.subplots()
ax.plot(wavenums_x[1:], np.sum(spec_x[1:, :], axis=1))
ax.grid(True, which="both")
if not harmonics:
ax.set_xlabel(f"$k_x$")
ax.set_ylabel(f"$E_{{k_x}}$")
else:
ax.set_xlabel(f"$n_x$")
ax.xaxis.get_major_locator().set_params(integer=True)
ax.set_ylabel(f"$E_{{n_x}}$")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig(figuresDir / f"{statePath.name}_spec_x_log.png")
k_spec_x = np.zeros((spec_x.shape[0], 4))
k_spec_x[:, 0] = wavenums_x
k_spec_x[:, 1:] = spec_x
if savetxt:
np.savetxt(figuresDir / f"{statePath.name}_spec_x.dat", k_spec_x)
fig, ax = plt.subplots()
ax.plot(wavenums_y[1:], np.sum(spec_y[1:, :], axis=1))
ax.grid(True, which="both")
if not harmonics:
ax.set_xlabel(f"$k_y$")
ax.set_ylabel(f"$E_{{k_y}}$")
else:
ax.set_xlabel(f"$n_y$")
ax.xaxis.get_major_locator().set_params(integer=True)
ax.set_ylabel(f"$E_{{n_y}}$")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig(figuresDir / f"{statePath.name}_spec_y_log.png")
k_spec_y = np.zeros((spec_y.shape[0], 4))
k_spec_y[:, 0] = wavenums_y
k_spec_y[:, 1:] = spec_y
if savetxt:
np.savetxt(figuresDir / f"{statePath.name}_spec_y.dat", k_spec_y)
fig, ax = plt.subplots()
ax.plot(wavenums_z[1:], np.sum(spec_z[1:, :], axis=1))
ax.grid(True, which="both")
if not harmonics:
ax.set_xlabel(f"$k_z$")
ax.set_ylabel(f"$E_{{k_z}}$")
else:
ax.set_xlabel(f"$n_z$")
ax.xaxis.get_major_locator().set_params(integer=True)
ax.set_ylabel(f"$E_{{n_z}}$")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig(figuresDir / f"{statePath.name}_spec_z_log.png")
k_spec_z = np.zeros((spec_z.shape[0], 4))
k_spec_z[:, 0] = wavenums_z
k_spec_z[:, 1:] = spec_z
if savetxt:
np.savetxt(figuresDir / f"{statePath.name}_spec_z.dat", k_spec_z)
if iso:
fig, ax = plt.subplots()
ax.plot(spec_iso[:, 0], np.sum(spec_iso[:, 1:], axis=1))
if average:
ax.plot(
spec_iso[:, 0],
1.5 * (dissipation ** (2 / 3)) * np.power(spec_iso[:, 0], -5 / 3),
color="k",
)
ax.grid(True, which="both")
if not harmonics:
ax.set_xlabel(f"$k$")
ax.set_ylabel(f"$E_k$")
else:
ax.set_xlabel(f"$n$")
ax.xaxis.get_major_locator().set_params(integer=True)
ax.set_ylabel(f"$E_n$")
ax.set_xscale("log")
ax.set_yscale("log")
fig.savefig(figuresDir / f"{statePath.name}_spec_iso.png")
if savetxt:
np.savetxt(figuresDir / f"{statePath.name}_spec_iso.dat", spec_iso)
if not noshow:
plt.show()
def dnsspec(state, header, iso=False, compute_dissipation=False):
forcing, nx, ny, nz, Lx, Lz, Re, tilt_angle, dt, itime, time = header
nxp, nyp, nzp = nx // 2 - 1, ny // 2 - 1, nz // 2 - 1
spec_x = np.zeros((nxp + 1, 3))
spec_y = np.zeros((nyp + 1, 3))
spec_z = np.zeros((nzp + 1, 3))
if iso:
spec_iso = {}
_kx = 2 * np.pi / Lx
_ky = 2 * np.pi / dns.Ly
_kz = 2 * np.pi / Lz
_kmin = min(_kx, _ky)
_kmin = min(_kmin, _kz)
for i in range(0, nx):
kx = i
if kx > nx // 2:
kx = kx - nx
kx = np.abs(kx)
# x keeps one zeroed mode (the largest)
if kx == nxp + 1:
continue
for j in range(0, nyp + 1):
ky = j
for k in range(0, nz):
kz = k
if kz > nz // 2:
kz = kz - nz
kz = np.abs(kz)
if kz == nzp + 1:
continue
part = (np.conj(state[i, j, k, :]) * state[i, j, k, :]).real
# Correct for double counting (Hermiticity)
if ky == 0:
part = part / 2
spec_x[kx, :] += part
spec_y[ky, :] += part
spec_z[kz, :] += part
# isotropic spectrum
if iso and not (kx == 0 and ky == 0 and kz == 0):
k_iso = int(
round(
np.sqrt((_kx * kx) ** 2 + (_ky * ky) ** 2 + (_kz * kz) ** 2)
/ _kmin
)
)
if k_iso in spec_iso:
spec_iso[k_iso] += part
else:
spec_iso[k_iso] = part
if compute_dissipation:
dissipation = dns.dissipation(state, Lx, Lz, Re)
else:
dissipation = None
if iso:
spec_iso_ = np.zeros((len(spec_iso), 4), dtype=np.float64)
i = 0
for key, value in spec_iso.items():
spec_iso_[i, 0] = key * _kmin
spec_iso_[i, 1:] = value
i += 1
sorter = np.argsort(spec_iso_[:, 0])
spec_iso = spec_iso_[sorter]
else:
spec_iso = None
return (
spec_x,
spec_y,
spec_z,
Lx,
Lz,
spec_iso,
dissipation,
)
if __name__ == "__main__":
main()
| 29.416413
| 89
| 0.516326
|
bb7ffd291218ad571bbec1ac891eab48d07abb52
| 4,103
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/histogram2d/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/histogram2d/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/histogram2d/_stream.py
|
eranws/plotly.py
|
5b0e8d3ccab55fe1a6e4ba123cfc9d718a9ffc5a
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d"
_path_str = "histogram2d.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 29.099291
| 82
| 0.539605
|
aa0a8e9b66ea38f871f15be0cb51d0f056ab7c9a
| 4,907
|
py
|
Python
|
camp/commands.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | null | null | null |
camp/commands.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | null | null | null |
camp/commands.py
|
pedrovelho/camp
|
98105c9054b8db3377cb6a06e7b5451b97c6c285
|
[
"MIT"
] | 1
|
2019-02-05T08:49:41.000Z
|
2019-02-05T08:49:41.000Z
|
#
# CAMP
#
# Copyright (C) 2017, 2018 SINTEF Digital
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
from argparse import ArgumentParser
from camp import About
class Command(object):
@staticmethod
def extract_from(command_line):
parser = ArgumentParser(prog=About.PROGRAM,
description=About.DESCRIPTION)
subparsers = parser.add_subparsers(dest="command")
generate = subparsers.add_parser(
"generate",
help="Generate new test configurations")
generate.add_argument(
"-d",
"--directory",
dest="working_directory",
help="the directory that contains input files")
generate.add_argument(
"-a",
"--all",
action="store_false",
dest="coverage",
help="Generate all possibles configurations")
generate.add_argument(
"-c",
"--coverage",
action="store_true",
dest="coverage",
help="Generate only enough configurations to cover every single variations")
realize = subparsers.add_parser(
"realize",
help="Realize the variables in the test configurations")
realize.add_argument(
"-d",
"--directory",
dest="working_directory",
help="the directory that describes that contains the input files")
realize.add_argument(
"-o",
"--output",
dest="output_directory",
help="the directory that contains the generated configurations")
execute = subparsers.add_parser(
"execute",
help="Execute the test configurations generated")
execute.add_argument(
"-c",
"--config",
dest="configuration_file",
help="The INI file that describes which configurations to execute")
values = parser.parse_args(command_line)
return Command.from_namespace(values)
@staticmethod
def from_namespace(namespace):
if namespace.command == "generate":
return Generate(namespace.working_directory,
namespace.coverage)
elif namespace.command == "realize":
return Realize(namespace.working_directory,
namespace.output_directory)
elif namespace.command == "execute":
return Execute(namespace.configuration_file)
else:
message = "The command '%s' is not yet implemented." % namespace.command
raise NotImplementedError(message)
def send_to(self, camp):
message = "The method '{}.Command#send_to' should have been implemented!"
raise NotImplementedError(message.format(__name__))
class Generate(Command):
"""
Encapsulate calls to 'camp generate ...'
"""
DEFAULT_WORKING_DIRECTORY = "temp/xwiki"
DEFAULT_COVERAGE = True
def __init__(self, working_directory=None, coverage=None):
super(Generate, self).__init__()
self._working_directory = working_directory or \
self.DEFAULT_WORKING_DIRECTORY
self._coverage = coverage \
if coverage is not None else self.DEFAULT_COVERAGE
@property
def working_directory(self):
return self._working_directory
@property
def only_coverage(self):
return self._coverage
def send_to(self, camp):
camp.generate(self)
class Realize(Command):
"""
Encapsulate calls to 'camp realize ...'
"""
def __init__(self, working_directory, output_directory):
super(Realize, self).__init__()
self._working_directory = working_directory or \
self.DEFAULT_WORKING_DIRECTORY
self._output_directory = output_directory or \
self.DEFAULT_OUTPUT_DIRECTORY
DEFAULT_WORKING_DIRECTORY = "/temp/"
DEFAULT_OUTPUT_DIRECTORY = "/temp/out"
@property
def working_directory(self):
return self._working_directory
@property
def output_directory(self):
return self._output_directory
def send_to(self, camp):
camp.realize(self)
class Execute(Command):
"""
Encapsulate calls to 'camp execute ...'
"""
DEFAULT_CONFIGURATION_FILE = "config.ini"
def __init__(self, configuration_file):
super(Execute, self).__init__()
self._configuration_file = configuration_file \
or self.DEFAULT_CONFIGURATION_FILE
@property
def configuration_file(self):
return self._configuration_file
def send_to(self, camp):
camp.execute(self)
| 26.668478
| 88
| 0.605258
|
d6d862db21946143c1c3d446b2ddce12f44636f1
| 99
|
py
|
Python
|
utils/__init__.py
|
jdstmporter/wav2mp3
|
9b41620b24c53f4d4941c32d074b33a5b594f033
|
[
"BSD-3-Clause"
] | 1
|
2021-08-14T07:01:15.000Z
|
2021-08-14T07:01:15.000Z
|
utils/__init__.py
|
jdstmporter/pcm2mp3
|
1d99558578fdea7dcf2c7db4afe6ad7704a9f140
|
[
"BSD-3-Clause"
] | null | null | null |
utils/__init__.py
|
jdstmporter/pcm2mp3
|
1d99558578fdea7dcf2c7db4afe6ad7704a9f140
|
[
"BSD-3-Clause"
] | null | null | null |
from .checkForLibs import libs, gcc, CheckLibrary, CheckCompiler
from .clean import Cleaner
| 16.5
| 64
| 0.767677
|
bae5bc3947a5fe116736893d8379abceb03aa5d3
| 3,969
|
py
|
Python
|
src/conv.py
|
toomanyjoes/mrperfcs386m
|
0f04649a31cc658ef0947945211a65f7b23b6f5e
|
[
"MIT"
] | null | null | null |
src/conv.py
|
toomanyjoes/mrperfcs386m
|
0f04649a31cc658ef0947945211a65f7b23b6f5e
|
[
"MIT"
] | 1
|
2015-11-16T16:53:10.000Z
|
2015-11-16T16:53:10.000Z
|
test/conv.py
|
toomanyjoes/mrperfcs386m
|
0f04649a31cc658ef0947945211a65f7b23b6f5e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""Hadoop Simulator
This simulator takes three configuration files, topology.xml, metadata.xml,
and job.xml, describing a Hadoop job and the topology it will run on.
Two tcl files, topology.tcl and events.tcl, will be generated as input
for ns-2 for further simulation.
"""
#import xml.dom
import xml.dom.minidom
import sys
from optparse import OptionParser
import random
from gen import *
#import getopt
def convert(topo_xml):
root = xml.dom.minidom.parse(topo_xml)
#topo = root.getElementsByTagName(u"topo")[0]
topo = xml_children(root, u'topo')[0]
racks = {}
#lines = ["proc create-topology { } {\n", "\tglobal ns opt\n"]
for rack_group in topo.getElementsByTagName(u"rack_group"):
numrack = len(rack_group.getElementsByTagName(u"rack_index"))
namenode = rack_group.getElementsByTagName(u'name')[0]
name = str(namenode.childNodes[0].nodeValue)
node_group = rack_group.getElementsByTagName(u'compute_node_group')[0]
numnode = len(node_group.getElementsByTagName(u'node_index'))
numswitch = len(rack_group.getElementsByTagName(u'switch_index'))
#lines.append("\tglobal %s\n" % (" ".join([name+'_'+str(i) for i in range(numrack)])))
lines.append("\tfor {set i 0} {$i < %d} {incr i} {\n" % (numrack))
lines.append("\t\tcreate-nodes %s_$i %d\n" % (name, numnode))
lines.append("\t}\n")
if name in racks.keys():
print "error: rack_group name \"%s\" conflict\n" % (name)
connect = [[0]*numswitch for i in range(numrack)]
racks[name] = [name, numrack, numnode, numswitch, connect]
for router in topo.getElementsByTagName(u'router'):
router_name = str(router.getElementsByTagName(u'name')[0].childNodes[0].nodeValue)
lines.append("\tset %s [$ns node]\n" % (router_name))
for group in router.getElementsByTagName(u'connect_to_group'):
switch_index = int(group.getElementsByTagName(u'switch_index')[0].childNodes[0].nodeValue)
rgname = str(group.getElementsByTagName(u'rack_group_name')[0].childNodes[0].nodeValue)
if rgname not in racks.keys():
print "error: rack group name %s not defined\n" % (rgname)
p = racks[rgname]
numrack = p[1]
numnode = p[2]
numswitch = p[3]
connect = p[4]
for i in range(numrack):
rack = connect[i]
if rack[switch_index] <> 0:
print "error: a switch (rack %s[%d] switch %d = %s) connected to multiple routers\n" % (rgname, i, switch_index, repr(rack[switch_index]))
rack[switch_index] = 1 #to indicate it's already written to tcl
#LAN with router
lines.append("\tfor {set i 0} {$i < %s} {incr i} {\n" % (numrack))
lines.append("\t\tcreate-lan $%s %s_$i %d\n" % (router_name, rgname, numnode))
lines.append("\t}\n")
for connect_to in router.getElementsByTagName(u'connect_to'):
print "hello"
lines.append('}\n')
f = open("hadoop.topo.tcl", "w")
f.writelines(lines)
f.close()
#print lines
def main():
usage = "usage: %prog options"
parser = OptionParser(usage)
parser.add_option("-v", "--verbose", default=False,
action="store_true", dest="verbose")
parser.add_option("-t", "--topology", dest="topo_xml",
help="topology configuration xml")
parser.add_option("-m", "--metadata", dest="meta_xml",
help="metadata configuration xml")
parser.add_option("-j", "--job", dest="job_xml",
help="job configuration xml")
parser.add_option("-T", "--topoout", dest="topo_tcl",
help="output tcl file describing topology",
default="hadoop.topo.tcl")
parser.add_option("-J", "--jobout", dest="job_tcl",
help="output tcl file describing job",
default="hadoop.job.tcl")
(options, args) = parser.parse_args()
if None in (options.topo_xml, options.meta_xml, options.job_xml):
print 'xmls not defined'
parser.print_help()
sys.exit()
topo = topology_t(options.topo_xml)
job = job_t(options.job_xml)
topo.totcl(options.topo_tcl)
topo.totcl2('mapnodes.tcl')
'''
f = open(options.job_tcl, 'w')
f.write(job.tcl)
f.close()
'''
if __name__ == "__main__":
main()
| 33.923077
| 143
| 0.689342
|
aeb2a2f2f18200c7e07a9116b509ef6cf62f73f4
| 2,979
|
py
|
Python
|
lib/tests/streamlit/show_test.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/streamlit/show_test.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
lib/tests/streamlit/show_test.py
|
sourcery-ai-bot/streamlit
|
cbfa69c8ec310a839148cfa4bac5697e6f392a79
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import unittest
from parameterized import parameterized
import streamlit as st
from streamlit import code_util
class ShowTest(unittest.TestCase):
"""Test helper methods of `show()` in Streamlit.__init__.py."""
def test_st_show(self):
"""Test st.experimental_show.
Ideally we could test the order and content of the deltas.
But not possible to inject a shared queue in `streamlit._with_dg()`
Improvements:
- verify markdown is escaped on write delta
"""
thing = "something"
with patch("streamlit.write") as write:
with patch("streamlit.markdown") as markdown:
st.experimental_show(thing)
write.assert_called_once()
markdown.assert_called_once()
foo_show_bar = "baz"
with patch("streamlit.write") as write:
with patch("streamlit.markdown") as markdown:
st.experimental_show(foo_show_bar)
write.assert_called_once()
markdown.assert_called_once()
@parameterized.expand([("simple", "(a, b, c)", range(3), ["a", "b", "c"]), ("complex", "(a, foo(c))", range(2), ["a", "foo(c)"]), ("tricky", "get(a, foo(c)) trash", range(2), ["a", "foo(c)"])])
def test_get_method_args_from_code(self, name, input, args, expected):
"""Parse method arguments from a string"""
parsed = code_util.get_method_args_from_code(args, input)
self.assertEqual(parsed, expected)
@parameterized.expand([("fails", '(a, ")b", c)', range(3), ["a", '")b"', "c"])])
def test_failed_get_args_from_code(self, name, input, args, expected):
"""Fail to parse method arguments from a string
The inclusion of `,` or `)` in a string with multiple args causes error
"""
with self.assertRaises(AssertionError):
code_util.get_method_args_from_code(args, input)
@parameterized.expand(
[
("simple", "(a, b, c)", ["a, b, c"]),
("complex", "(a, foo(c))", ["a, foo(c)"]),
("tricky", "pickup(a, foo(c)) my(trash)", ["a, foo(c)", "trash"]),
]
)
def test_extract_args(self, name, input, expected):
"""Parse contents of outer parentheses from a string"""
parsed = code_util.extract_args(input)
self.assertEqual(parsed, expected)
| 37.2375
| 197
| 0.63377
|
449be7ee4852a4cada910a7046a8f5d95851163f
| 1,015
|
py
|
Python
|
Modules/reid/default_config.py
|
NikAbba/video_tracking
|
c624a9d3596befa4a941e4ff4092b9545bfdd28d
|
[
"Apache-2.0"
] | null | null | null |
Modules/reid/default_config.py
|
NikAbba/video_tracking
|
c624a9d3596befa4a941e4ff4092b9545bfdd28d
|
[
"Apache-2.0"
] | null | null | null |
Modules/reid/default_config.py
|
NikAbba/video_tracking
|
c624a9d3596befa4a941e4ff4092b9545bfdd28d
|
[
"Apache-2.0"
] | 1
|
2021-04-23T19:12:44.000Z
|
2021-04-23T19:12:44.000Z
|
from yacs.config import CfgNode
def get_default_config():
cfg = CfgNode()
# reid model
cfg.model = CfgNode()
cfg.model.name = 'osnet_x1_0'
cfg.model.pretrained = True # automatically load pretrained model weights if available
# cfg.model.load_weights = './torchreid/trained_models/osnet_x0_25_imagenet.pth' # path to model weights
# cfg.model.load_weights = './torchreid/trained_models/osnet_x1_0_market_256x128_amsgrad_ep150_stp60_lr0.0015_b64_fb10_softmax_labelsmooth_flip.pth' # path to model weights
cfg.model.load_weights = './torchreid/trained_models/osnet_x1_0_msmt17_256x128_amsgrad_ep150_stp60_lr0.0015_b64_fb10_softmax_labelsmooth_flip.pth' # path to model weights
# cfg.model.load_weights = './torchreid/trained_models/GANOSNetGLOBAL.pth' # path to model weights
cfg.model.old = True
cfg.model.num_classes = 6
cfg.model.loss = 'softmax'
cfg.model.threshold = 0.25
cfg.model.refresh_threshold = 1000
cfg.model.maxlen = 60
return cfg
| 44.130435
| 177
| 0.75665
|
f462196c4f678dc61a70a620edab19fd959b1f2c
| 7,337
|
py
|
Python
|
external/netdef_models/lmbspecialops/test/test_FlowToDepth2.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | null | null | null |
external/netdef_models/lmbspecialops/test/test_FlowToDepth2.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | null | null | null |
external/netdef_models/lmbspecialops/test/test_FlowToDepth2.py
|
zhuhu00/MOTSFusion_modify
|
190224a7c3fbded69fedf9645a0ebbf08227fb6d
|
[
"MIT"
] | null | null | null |
#
# lmbspecialops - a collection of tensorflow ops
# Copyright (C) 2017 Benjamin Ummenhofer, Huizhong Zhou
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import tensorflow as tf
import numpy as np
import sys
print(sys.path)
sys.path.insert(0,'../python')
import lmbspecialops as ops
from helper import *
np.set_printoptions(linewidth=160)
USE_GPUS = sorted(set((False, tf.test.is_gpu_available())))
TYPES = (np.float32, np.float64)
#USE_GPUS = [False]
#TYPES = (np.float32, )
class FlowToDepth2Test(tf.test.TestCase):
def _test_random_data(self, dtype, inverse_depth, normalize_flow):
# random depth map and camera pose
depth = np.random.uniform(5,10, (1,1,6,12)).astype(dtype)
if inverse_depth:
depth = 1/depth
rotation = np.random.uniform(0.0,0.05, (1,3)).astype(dtype)
translation = (np.array([[1,0,0]]) + np.random.uniform(-0.2,0.2, (1,3))).astype(dtype)
intrinsics = np.array([[1,1,0.5,0.5]]).astype(dtype)
flow = ops.depth_to_flow(
depth=depth,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,
inverse_depth=inverse_depth,
normalize_flow=normalize_flow,).eval()
# rotation = angleaxis_to_rotation_matrix(rotation[0])[np.newaxis,:,:]
rotation = angleaxis_to_quaternion(rotation[0])[np.newaxis,:]
computed_depth = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,
inverse_depth=inverse_depth,
normalized_flow=normalize_flow,
rotation_format='quaternion').eval()
print('depth\n',depth)
print('computed_depth\n',computed_depth)
self.assertAllClose(depth, computed_depth, rtol=1e-4, atol=1e-4)
def test_random_data(self):
for use_gpu in USE_GPUS:
for dtype in TYPES:
print(use_gpu, dtype)
with self.test_session(use_gpu=use_gpu, force_gpu=use_gpu):
for inverse_depth in (False, True):
for normalize_flow in (False, True):
self._test_random_data(dtype, inverse_depth, normalize_flow)
def _test_rotation_formats(self, dtype, inverse_depth, normalize_flow):
# random depth map and camera pose
depth = np.random.uniform(5,10, (1,1,6,12)).astype(dtype)
if inverse_depth:
depth = 1/depth
rotation = np.random.uniform(0.0,0.05, (1,3)).astype(dtype)
translation = (np.array([[1,0,0]]) + np.random.uniform(-0.2,0.2, (1,3))).astype(dtype)
intrinsics = np.array([[1,1,0.5,0.5]]).astype(dtype)
flow = ops.depth_to_flow(
depth=depth,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,
inverse_depth=inverse_depth,
normalize_flow=normalize_flow,).eval()
rotation_aa = rotation
rotation_R = angleaxis_to_rotation_matrix(rotation[0])[np.newaxis,:,:]
rotation_q = angleaxis_to_quaternion(rotation[0])[np.newaxis,:]
computed_depth_aa = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation_aa,
translation=translation,
inverse_depth=inverse_depth,
normalized_flow=normalize_flow,
rotation_format='angleaxis3').eval()
computed_depth_R = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation_R,
translation=translation,
inverse_depth=inverse_depth,
normalized_flow=normalize_flow,
rotation_format='matrix').eval()
computed_depth_q = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation_q,
translation=translation,
inverse_depth=inverse_depth,
normalized_flow=normalize_flow,
rotation_format='quaternion').eval()
self.assertAllClose(computed_depth_aa, computed_depth_R, rtol=1e-4, atol=1e-4)
self.assertAllClose(depth, computed_depth_q, rtol=1e-4, atol=1e-4)
def test_rotation_formats(self):
for use_gpu in USE_GPUS:
for dtype in TYPES:
print(use_gpu, dtype)
with self.test_session(use_gpu=use_gpu, force_gpu=use_gpu):
for inverse_depth in (False, True):
for normalize_flow in (False, True):
self._test_rotation_formats(dtype, inverse_depth, normalize_flow)
def test_shape_no_batch_dimension(self):
dtype = np.float32
flow = np.zeros((2,6,12)).astype(dtype)
rotation = np.zeros((3,)).astype(dtype)
translation = np.array([1,0,0]).astype(dtype)
intrinsics = np.array([1,1,0.5,0.5]).astype(dtype)
depth = np.zeros((1,1,6,12)).astype(dtype)
computed_depth = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,)
self.assertShapeEqual(depth, computed_depth)
def test_shape_batch(self):
dtype = np.float32
batch = 7
flow = np.zeros((batch,2,6,12)).astype(dtype)
rotation = np.zeros((batch,3)).astype(dtype)
translation = np.zeros((batch,3)).astype(dtype)
intrinsics = np.zeros((batch,4)).astype(dtype)
depth = np.zeros((batch,1,6,12)).astype(dtype)
computed_depth = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,)
self.assertShapeEqual(depth, computed_depth)
def test_shape_batch_mismatch(self):
dtype = np.float32
batch = np.array([7,7,7,5],dtype=np.int32)
for i in range(4):
batch = np.roll(batch,1)
print(batch)
flow = np.zeros((batch[0],2,6,12)).astype(dtype)
rotation = np.zeros((batch[1],3)).astype(dtype)
translation = np.zeros((batch[2],3)).astype(dtype)
intrinsics = np.zeros((batch[3],4)).astype(dtype)
with self.assertRaises(ValueError) as cm:
computed_depth = ops.flow_to_depth2(
flow=flow,
intrinsics=intrinsics,
rotation=rotation,
translation=translation,)
self.assertStartsWith(str(cm.exception), 'Dimensions must be equal')
if __name__ == '__main__':
tf.test.main()
| 35.274038
| 94
| 0.611967
|
5d1abb06264521cd4e78034d7788260d98b5dfe3
| 2,990
|
py
|
Python
|
server/app/models/Person.py
|
abhijithvijayan/corona-screening-portal
|
fc2e17926131fdc76ff62f850e1cfce4e05b50dc
|
[
"MIT"
] | 1
|
2020-03-15T16:54:42.000Z
|
2020-03-15T16:54:42.000Z
|
server/app/models/Person.py
|
abhijithvijayan/corona-screening-portal
|
fc2e17926131fdc76ff62f850e1cfce4e05b50dc
|
[
"MIT"
] | 5
|
2020-03-15T17:06:51.000Z
|
2020-03-15T17:18:13.000Z
|
server/app/models/Person.py
|
abhijithvijayan/corona-screening-portal
|
fc2e17926131fdc76ff62f850e1cfce4e05b50dc
|
[
"MIT"
] | null | null | null |
from sqlalchemy import text
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import UUID, JSON
from sqlalchemy.ext.associationproxy import association_proxy
from .Association import Association
from app import db
class Person(db.Model):
"""Main Table"""
__tablename__ = 'corona__person'
uuid = db.Column(UUID(as_uuid=True),
unique=True,
server_default=text("uuid_generate_v4()"))
id = db.Column(db.Integer, index=True, primary_key=True)
# ---- Basic Person details ---- #
name = db.Column(db.String(64), index=True, nullable=False)
gender = db.Column(db.String(10), index=True, nullable=False)
age = db.Column(db.Integer, nullable=False)
address = db.Column(db.String(128), nullable=False)
town = db.Column(db.String(40), nullable=False)
phone = db.Column(db.Unicode(20), nullable=False)
location = db.Column(db.String(64), nullable=False)
coordinates = db.Column(JSON, nullable=False)
type_of_person = db.Column(db.String(15), index=True, nullable=False)
# ---- Meta data ---- #
created_at = db.Column(db.DateTime, index=True, server_default=func.now())
updated_at = db.Column(db.DateTime, index=True,
server_default=func.now()) # ToDo: fix auto updation
# ---- Relationships ---- #
interaction_from = relationship(
'Association',
backref='suspect__interaction',
primaryjoin=(id == Association.suspect_id)
)
interaction_to = relationship(
'Association',
backref='patient__interaction',
primaryjoin=(id == Association.patient_id)
)
def to_json(self):
json_person = {
'id': 'Pta / cov / {}'.format(self.id),
'name': self.name,
'created_at': self.created_at,
'updated_at': self.updated_at
}
return json_person
def complete_json(self):
json_person = {
'id': 'Pta / cov / {}'.format(self.id),
'name': self.name,
'gender': self.gender,
'age': self.age,
'address': self.address,
'town': self.town,
'phone': self.phone,
'location': {
'value': self.location,
'coordinates': self.coordinates
},
'type_of_person': self.type_of_person,
'created_at': self.created_at,
'updated_at': self.updated_at
}
if self.type_of_person == 'suspect' and len(self.interaction_from) != 0:
"""self.interaction_from is an array"""
json_person['category_of_suspect'] = self.interaction_from[0].category_of_suspect
json_person['severity'] = self.interaction_from[0].severity
return json_person
# method tells Python how to print objects of this class
def __repr__(self):
return '<Person {}>'.format(self.id)
| 36.463415
| 93
| 0.610702
|
bce6b471c5deafd857df027eb9597b5347560972
| 6,615
|
py
|
Python
|
lib/axis/tb/axis_demux/test_axis_demux.py
|
1847123212/verilog-ethernet
|
84004c720dd1a873db96632c9c766badf1de59be
|
[
"MIT"
] | 1
|
2022-01-24T04:54:00.000Z
|
2022-01-24T04:54:00.000Z
|
fpga/lib/eth/lib/axis/tb/axis_demux/test_axis_demux.py
|
linjw16/corundum
|
65ad32421bacc497823ca939b0b9f0801063c4ea
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
fpga/lib/eth/lib/axis/tb/axis_demux/test_axis_demux.py
|
linjw16/corundum
|
65ad32421bacc497823ca939b0b9f0801063c4ea
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2022-03-01T13:36:39.000Z
|
2022-03-01T13:36:39.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import subprocess
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiStreamBus, AxiStreamFrame, AxiStreamSource, AxiStreamSink
class TB(object):
def __init__(self, dut):
self.dut = dut
ports = len(dut.axis_demux_inst.m_axis_tvalid)
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 10, units="ns").start())
self.source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "s_axis"), dut.clk, dut.rst)
self.sink = [AxiStreamSink(AxiStreamBus.from_prefix(dut, f"m{k:02d}_axis"), dut.clk, dut.rst) for k in range(ports)]
dut.enable.setimmediatevalue(0)
dut.drop.setimmediatevalue(0)
dut.select.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
for sink in self.sink:
sink.set_pause_generator(generator())
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, idle_inserter=None, backpressure_inserter=None, port=0):
tb = TB(dut)
id_width = len(tb.source.bus.tid)
id_count = 2**id_width
id_mask = id_count-1
dest_width = len(tb.sink[0].bus.tid)
dest_count = 2**dest_width
dest_mask = dest_count-1
cur_id = 1
await tb.reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
test_frames = []
dut.enable.setimmediatevalue(1)
dut.drop.setimmediatevalue(0)
dut.select.setimmediatevalue(port)
for test_data in [payload_data(x) for x in payload_lengths()]:
test_frame = AxiStreamFrame(test_data)
test_frame.tid = cur_id
test_frame.tdest = cur_id | (port << dest_width)
test_frames.append(test_frame)
await tb.source.send(test_frame)
cur_id = (cur_id + 1) % id_count
for test_frame in test_frames:
rx_frame = await tb.sink[port].recv()
assert rx_frame.tdata == test_frame.tdata
assert rx_frame.tid == test_frame.tid
assert rx_frame.tdest == (test_frame.tdest & dest_mask)
assert not rx_frame.tuser
assert tb.sink[port].empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
def size_list():
data_width = len(cocotb.top.s_axis_tdata)
byte_width = data_width // 8
return list(range(1, byte_width*4+1))+[512]+[1]*64
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
if cocotb.SIM_NAME:
ports = len(cocotb.top.axis_demux_inst.m_axis_tvalid)
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.add_option("port", list(range(ports)))
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("tdest_route", [0, 1])
@pytest.mark.parametrize("data_width", [8, 16, 32])
@pytest.mark.parametrize("ports", [4])
def test_axis_demux(request, ports, data_width, tdest_route):
dut = "axis_demux"
wrapper = f"{dut}_wrap_{ports}"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = wrapper
# generate wrapper
wrapper_file = os.path.join(tests_dir, f"{wrapper}.v")
if not os.path.exists(wrapper_file):
subprocess.Popen(
[os.path.join(rtl_dir, f"{dut}_wrap.py"), "-p", f"{ports}"],
cwd=tests_dir
).wait()
verilog_sources = [
wrapper_file,
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = data_width
parameters['KEEP_ENABLE'] = int(parameters['DATA_WIDTH'] > 8)
parameters['KEEP_WIDTH'] = (parameters['DATA_WIDTH'] + 7) // 8
parameters['ID_ENABLE'] = 1
parameters['ID_WIDTH'] = 8
parameters['DEST_ENABLE'] = 1
parameters['M_DEST_WIDTH'] = 8
parameters['S_DEST_WIDTH'] = parameters['M_DEST_WIDTH'] + (ports-1).bit_length()
parameters['USER_ENABLE'] = 1
parameters['USER_WIDTH'] = 1
parameters['TDEST_ROUTE'] = tdest_route
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
extra_env['PORTS'] = str(ports)
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| 30.767442
| 124
| 0.691308
|
d0cd644868f560a339f5adf43f57c7d726731b0d
| 3,801
|
py
|
Python
|
netbox/dcim/management/commands/trace_paths.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | 1
|
2021-12-09T13:41:46.000Z
|
2021-12-09T13:41:46.000Z
|
netbox/dcim/management/commands/trace_paths.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | null | null | null |
netbox/dcim/management/commands/trace_paths.py
|
cybarox/netbox
|
ea197eff5f4fe925bb354d1375912decd81752bd
|
[
"Apache-2.0"
] | 1
|
2022-02-07T20:36:31.000Z
|
2022-02-07T20:36:31.000Z
|
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connection
from django.db.models import Q
from dcim.models import CablePath, ConsolePort, ConsoleServerPort, Interface, PowerFeed, PowerOutlet, PowerPort
from dcim.signals import create_cablepath
ENDPOINT_MODELS = (
ConsolePort,
ConsoleServerPort,
Interface,
PowerFeed,
PowerOutlet,
PowerPort
)
class Command(BaseCommand):
help = "Generate any missing cable paths among all cable termination objects in NetBox"
def add_arguments(self, parser):
parser.add_argument(
"--force", action='store_true', dest='force',
help="Force recalculation of all existing cable paths"
)
parser.add_argument(
"--no-input", action='store_true', dest='no_input',
help="Do not prompt user for any input/confirmation"
)
def draw_progress_bar(self, percentage):
"""
Draw a simple progress bar 20 increments wide illustrating the specified percentage.
"""
bar_size = int(percentage / 5)
self.stdout.write(f"\r [{'#' * bar_size}{' ' * (20-bar_size)}] {int(percentage)}%", ending='')
def handle(self, *model_names, **options):
# If --force was passed, first delete all existing CablePaths
if options['force']:
cable_paths = CablePath.objects.all()
paths_count = cable_paths.count()
# Prompt the user to confirm recalculation of all paths
if paths_count and not options['no_input']:
self.stdout.write(self.style.ERROR("WARNING: Forcing recalculation of all cable paths."))
self.stdout.write(
f"This will delete and recalculate all {paths_count} existing cable paths. Are you sure?"
)
confirmation = input("Type yes to confirm: ")
if confirmation != 'yes':
self.stdout.write(self.style.SUCCESS("Aborting"))
return
# Delete all existing CablePath instances
self.stdout.write(f"Deleting {paths_count} existing cable paths...")
deleted_count, _ = CablePath.objects.all().delete()
self.stdout.write((self.style.SUCCESS(f' Deleted {deleted_count} paths')))
# Reinitialize the model's PK sequence
self.stdout.write(f'Resetting database sequence for CablePath model')
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [CablePath])
with connection.cursor() as cursor:
for sql in sequence_sql:
cursor.execute(sql)
# Retrace paths
for model in ENDPOINT_MODELS:
params = Q(cable__isnull=False)
if hasattr(model, 'wireless_link'):
params |= Q(wireless_link__isnull=False)
origins = model.objects.filter(params)
if not options['force']:
origins = origins.filter(_path__isnull=True)
origins_count = origins.count()
if not origins_count:
self.stdout.write(f'Found no missing {model._meta.verbose_name} paths; skipping')
continue
self.stdout.write(f'Retracing {origins_count} cabled {model._meta.verbose_name_plural}...')
i = 0
for i, obj in enumerate(origins, start=1):
create_cablepath(obj)
if not i % 100:
self.draw_progress_bar(i * 100 / origins_count)
self.draw_progress_bar(100)
self.stdout.write(self.style.SUCCESS(f'\n Retraced {i} {model._meta.verbose_name_plural}'))
self.stdout.write(self.style.SUCCESS('Finished.'))
| 41.769231
| 111
| 0.618521
|
ae3f75d92abee26a59deb7cbafb494d0bf36be07
| 1,524
|
py
|
Python
|
exception/unitTest.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
exception/unitTest.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | 16
|
2020-02-12T03:09:30.000Z
|
2022-03-12T00:08:59.000Z
|
exception/unitTest.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
# 单元测试
# 为了编写单元测试,我们需要引入Python自带的unittest模块
import unittest
from mydict import MyDict
class TestDict(unittest.TestCase):
def test_init(self):
d = MyDict(a=1,b='test')
self.assertEqual(d.a,1)
self.assertEqual(d.b,'test')
self.assertTrue(isinstance(d, dict))
def test_key(self):
d = MyDict()
d['key'] = 'value'
self.assertEqual(d.key,'value')
def test_attr(self):
d = MyDict()
d.key = 'value'
self.assertTrue('key' in d)
self.assertEqual(d['key'],'value')
def test_keyerror(self):
d = MyDict
with self.assertRaises(KeyError):
value = d['empty']
def test_attrerror(self):
d = MyDict()
with self.assertRaises(AttributeError):
value = d.empty
# 编写单元测试时,我们需要编写一个测试类,从unittest.TestCase继承。
# 以test开头的方法就是测试方法,不以test开头的方法不被认为是测试方法,测试的时候不会被执行。
# 对每一类测试都需要编写一个test_xxx()方法。由于unittest.TestCase提供了很多内置的条件判断,我们只需要调用这些方法就可以断言输出是否是我们所期望的。最常用的断言就是assertEqual()
# 另一种重要的断言就是期待抛出指定类型的Error,比如通过d['empty']访问不存在的key时,断言会抛出KeyError
# 而通过d.empty访问不存在的key时,我们期待抛出AttributeError
# setUp与tearDown
# 可以在单元测试中编写两个特殊的setUp()和tearDown()方法。这两个方法会分别在每调用一个测试方法的前后分别被执行。
# setUp()和tearDown()方法有什么用呢?设想你的测试需要启动一个数据库,这时,就可以在setUp()方法中连接数据库,在tearDown()方法中关闭数据库,这样,不必在每个测试方法中重复相同的代码
def setUp(self):
print('set up...')
def tearDown(self):
print('tear down...')
# 运行单元测试
# 最简单的运行方式是在mydict_test.py的最后加上两行代码
if __name__ == '__main__':
unittest.main()
| 23.446154
| 109
| 0.674541
|
109904bcb6be40f483f6da70ec70977a72e11a15
| 2,886
|
py
|
Python
|
fRunCommand.py
|
SkyLined/rs
|
df13f1c73a80fdb2a5e6626d6d88c5da974db64f
|
[
"CC-BY-4.0"
] | 13
|
2018-03-28T12:05:09.000Z
|
2021-01-30T07:27:05.000Z
|
fRunCommand.py
|
SkyLined/rs
|
df13f1c73a80fdb2a5e6626d6d88c5da974db64f
|
[
"CC-BY-4.0"
] | 2
|
2017-12-31T10:23:35.000Z
|
2017-12-31T10:24:09.000Z
|
fRunCommand.py
|
SkyLined/rs
|
df13f1c73a80fdb2a5e6626d6d88c5da974db64f
|
[
"CC-BY-4.0"
] | null | null | null |
import os, re;
from mWindowsAPI import cConsoleProcess;
sComSpec = os.environ["COMSPEC"];
rSubstitudeTemplate = re.compile(
r"(\\)?"
r"\{"
r"(~?)"
r"("
r"l"
r"|"
r"(n|p)?[0-9]+"
r"|"
r"[fdpnx]+"
r")"
r"\}"
);
def fRunCommand(asCommandTemplate, sFilePath, o0LastNameMatch, o0LastPathMatch, auLineNumbers = []):
asCommandTemplate = [s for s in asCommandTemplate];
sDrivePath, sNameExtension = sFilePath.rsplit("\\", 1);
if ":" in sDrivePath:
sDrive, sPath = sDrivePath.split(":", 1);
if sDrive.startswith("\\\\?\\"):
sDrive = sDrive[4:];
sDrive += ":";
else:
sDrive, sPath = "", sDrivePath;
if "." in sNameExtension:
sName, sExtension = sNameExtension.rsplit(".", 1);
sExtension = "." + sExtension;
else:
sName, sExtension = sNameExtension, "";
def fsSubstitudeTemplate(oMatch):
sEscape, sDoNotQuote, sChars, s0IndexAppliesToNameOrPath = oMatch.groups();
if sEscape:
return "{" + sDoNotQuote + sChars + "}"; # do not replace.
if sChars == "l":
if fsSubstitudeTemplate.uCurrentLineNumberIndex < len(auLineNumbers):
fsSubstitudeTemplate.uCurrentLineNumberIndex += 1;
return "%d" % auLineNumbers[fsSubstitudeTemplate.uCurrentLineNumberIndex - 1];
return "-1";
if sChars[0] in "0123456789":
o0LastNameOrPathMatch = (
o0LastNameMatch if s0IndexAppliesToNameOrPath == "n" else
o0LastPathMatch if s0IndexAppliesToNameOrPath == "p" else
(o0LastNameMatch or o0LastPathMatch)
);
assert o0LastNameOrPathMatch, \
"There is no %s match from which to extract group %s" % (
{"n": "name", "p": "path"}.get(s0IndexAppliesToNameOrPath, "name or path"),
sChars
)
try:
sSubstitute = o0LastNameOrPathMatch.group(int(sChars));
except IndexError:
sSubstitute = "";
else:
sSubstitute = "";
dsReplacements = {
"f": sFilePath,
"d": sDrive or "",
"p": sPath or "",
"n": sName or "",
"x": sExtension or "",
};
sLastChar = "";
for sChar in sChars:
if sChar == "n" and sLastChar == "p":
sSubstitute += "\\"
sSubstitute += dsReplacements[sChar];
sLastChar = sChar;
if sDoNotQuote == "":
sSubstitute = '"%s"' % sSubstitute.replace('"', '"""');
return sSubstitute;
fsSubstitudeTemplate.uCurrentLineNumberIndex = 0;
asCommandLine = [
# match everything "{" replacement "}", and note if "{" is escaped as "\\{"
rSubstitudeTemplate.sub(fsSubstitudeTemplate, sTemplate)
for sTemplate in asCommandTemplate
];
oProcess = cConsoleProcess.foCreateForBinaryPathAndArguments(
sBinaryPath = sComSpec,
asArguments = ["/C"] + asCommandLine,
bRedirectStdOut = False,
bRedirectStdErr = False,
);
oProcess.fbWait();
| 30.702128
| 100
| 0.608801
|
acb37d7672c988fc4222b4a59896447e51ff7579
| 19,745
|
py
|
Python
|
i3ipc/connection.py
|
vincentbernat/i3ipc-python
|
1d0c214e0013816c413e69ef923fb3fe353cf26f
|
[
"BSD-3-Clause"
] | null | null | null |
i3ipc/connection.py
|
vincentbernat/i3ipc-python
|
1d0c214e0013816c413e69ef923fb3fe353cf26f
|
[
"BSD-3-Clause"
] | null | null | null |
i3ipc/connection.py
|
vincentbernat/i3ipc-python
|
1d0c214e0013816c413e69ef923fb3fe353cf26f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from .con import Con
from .replies import (BarConfigReply, CommandReply, ConfigReply, OutputReply, TickReply,
VersionReply, WorkspaceReply, SeatReply, InputReply)
from .events import (IpcBaseEvent, BarconfigUpdateEvent, BindingEvent, OutputEvent, ShutdownEvent,
WindowEvent, TickEvent, ModeEvent, WorkspaceEvent, InputEvent, Event)
from ._private import PubSub, MessageType, EventType, Synchronizer
from typing import List, Optional, Union, Callable
import struct
import json
import socket
import os
from threading import Timer, Lock
import time
import Xlib
import Xlib.display
from Xlib.error import DisplayError
import logging
from subprocess import run, PIPE
logger = logging.getLogger(__name__)
class Connection:
"""A connection to the i3 ipc used for querying window manager state and
listening to events.
The ``Connection`` class is the entry point into all features of the
library.
:Example:
.. code-block:: python3
i3 = Connection()
workspaces = i3.get_workspaces()
i3.command('focus left')
:param socket_path: A path to the i3 ipc socket path to connect to. If not
given, find the socket path through the default search path.
:type socket_path: str
:param auto_reconnect: Whether to attempt to reconnect if the connection to
the socket is broken when i3 restarts.
:type auto_reconnect: bool
:raises Exception: If the connection to i3 cannot be established.
"""
_MAGIC = 'i3-ipc' # safety string for i3-ipc
_chunk_size = 1024 # in bytes
_timeout = 0.5 # in seconds
_struct_header = '=%dsII' % len(_MAGIC.encode('utf-8'))
_struct_header_size = struct.calcsize(_struct_header)
def __init__(self, socket_path=None, auto_reconnect=False):
if socket_path:
logger.info('using user provided socket path: %s', socket_path)
else:
socket_path = self._find_socket_path()
if not socket_path:
raise Exception('Failed to retrieve the i3 or sway IPC socket path')
self.subscriptions = 0
self._pubsub = PubSub(self)
self._socket_path = socket_path
self._cmd_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._cmd_socket.connect(self._socket_path)
self._cmd_lock = Lock()
self._sub_socket = None
self._sub_lock = Lock()
self._auto_reconnect = auto_reconnect
self._quitting = False
self._synchronizer = None
def _find_socket_path(self):
socket_path = os.environ.get("I3SOCK")
if socket_path:
logger.info('got socket path from I3SOCK env variable: %s', socket_path)
return socket_path
socket_path = os.environ.get("SWAYSOCK")
if socket_path:
logger.info('got socket path from SWAYSOCK env variable: %s', socket_path)
return socket_path
try:
disp = Xlib.display.Display()
root = disp.screen().root
i3atom = disp.intern_atom("I3_SOCKET_PATH")
prop = root.get_full_property(i3atom, Xlib.X.AnyPropertyType)
if prop and prop.value:
socket_path = prop.value.decode()
except DisplayError as e:
logger.info('could not get i3 socket path from root atom', exc_info=e)
if socket_path:
logger.info('got socket path from root atom: %s', socket_path)
return socket_path
for binary in ('i3', 'sway'):
try:
process = run([binary, '--get-socketpath'], stdout=PIPE, stderr=PIPE)
if process.returncode == 0 and process.stdout:
socket_path = process.stdout.decode().strip()
logger.info('got socket path from `%s` binary: %s', binary, socket_path)
return socket_path
else:
logger.info(
'could not get socket path from `%s` binary: returncode=%d, stdout=%s, stderr=%s',
process.returncode, process.stdout, process.stderr)
except Exception as e:
logger.info('could not get i3 socket path from `%s` binary', binary, exc_info=e)
continue
logger.info('could not find i3 socket path')
return None
def _sync(self):
if self._synchronizer is None:
self._synchronizer = Synchronizer()
self._synchronizer.sync()
@property
def socket_path(self) -> str:
"""The path of the socket this ``Connection`` is connected to.
:rtype: str
"""
return self._socket_path
@property
def auto_reconnect(self) -> bool:
"""Whether this ``Connection`` will attempt to reconnect when the
connection to the socket is broken.
:rtype: bool
"""
return self._auto_reconnect
def _pack(self, msg_type, payload):
"""Packs the given message type and payload. Turns the resulting
message into a byte string.
"""
pb = payload.encode('utf-8')
s = struct.pack('=II', len(pb), msg_type.value)
return self._MAGIC.encode('utf-8') + s + pb
def _unpack(self, data):
"""Unpacks the given byte string and parses the result from JSON.
Returns None on failure and saves data into "self.buffer".
"""
msg_magic, msg_length, msg_type = self._unpack_header(data)
msg_size = self._struct_header_size + msg_length
# XXX: Message shouldn't be any longer than the data
payload = data[self._struct_header_size:msg_size]
return payload.decode('utf-8', 'replace')
def _unpack_header(self, data):
"""Unpacks the header of given byte string.
"""
return struct.unpack(self._struct_header, data[:self._struct_header_size])
def _ipc_recv(self, sock):
data = sock.recv(14)
if len(data) == 0:
logger.info('got EOF from ipc socket')
return '', 0
msg_magic, msg_length, msg_type = self._unpack_header(data)
logger.info('reading ipc message: type=%s, length=%s', msg_type, msg_length)
msg_size = self._struct_header_size + msg_length
while len(data) < msg_size:
data += sock.recv(msg_length)
payload = self._unpack(data)
logger.info('message payload: %s', payload)
return payload, msg_type
def _ipc_send(self, sock, message_type, payload):
"""Send and receive a message from the ipc. NOTE: this is not thread
safe
"""
logger.info('sending to ipc socket: type=%s, payload=%s', message_type, payload)
sock.sendall(self._pack(message_type, payload))
data, msg_type = self._ipc_recv(sock)
return data
def _wait_for_socket(self):
# for the auto_reconnect feature only
socket_path_exists = False
for tries in range(0, 500):
socket_path_exists = os.path.exists(self._socket_path)
if socket_path_exists:
break
time.sleep(0.001)
return socket_path_exists
def _message(self, message_type, payload):
try:
self._cmd_lock.acquire()
return self._ipc_send(self._cmd_socket, message_type, payload)
except ConnectionError as e:
if not self.auto_reconnect:
raise e
logger.info('got a connection error, reconnecting', exc_info=e)
# XXX: can the socket path change between restarts?
if not self._wait_for_socket():
logger.info('could not reconnect')
raise e
self._cmd_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._cmd_socket.connect(self._socket_path)
return self._ipc_send(self._cmd_socket, message_type, payload)
finally:
self._cmd_lock.release()
def command(self, payload: str) -> List[CommandReply]:
"""Sends a command to i3.
.. seealso:: https://i3wm.org/docs/userguide.html#list_of_commands
:param cmd: The command to send to i3.
:type cmd: str
:returns: A list of replies that contain info for the result of each
command given.
:rtype: list(:class:`CommandReply <i3ipc.CommandReply>`)
"""
data = self._message(MessageType.COMMAND, payload)
if data:
data = json.loads(data)
return CommandReply._parse_list(data)
else:
return []
def get_version(self) -> VersionReply:
"""Gets the i3 version.
:returns: The i3 version.
:rtype: :class:`i3ipc.VersionReply`
"""
data = self._message(MessageType.GET_VERSION, '')
data = json.loads(data)
return VersionReply(data)
def get_bar_config(self, bar_id: str = None) -> Optional[BarConfigReply]:
"""Gets the bar configuration specified by the id.
:param bar_id: The bar id to get the configuration for. If not given,
get the configuration for the first bar id.
:type bar_id: str
:returns: The bar configuration for the bar id.
:rtype: :class:`BarConfigReply <i3ipc.BarConfigReply>` or :class:`None`
if no bar configuration is found.
"""
if not bar_id:
bar_config_list = self.get_bar_config_list()
if not bar_config_list:
return None
bar_id = bar_config_list[0]
data = self._message(MessageType.GET_BAR_CONFIG, bar_id)
data = json.loads(data)
return BarConfigReply(data)
def get_bar_config_list(self) -> List[str]:
"""Gets the names of all bar configurations.
:returns: A list of all bar configurations.
:rtype: list(str)
"""
data = self._message(MessageType.GET_BAR_CONFIG, '')
return json.loads(data)
def get_outputs(self) -> List[OutputReply]:
"""Gets the list of current outputs.
:returns: A list of current outputs.
:rtype: list(:class:`i3ipc.OutputReply`)
"""
data = self._message(MessageType.GET_OUTPUTS, '')
data = json.loads(data)
return OutputReply._parse_list(data)
def get_inputs(self) -> List[InputReply]:
"""(sway only) Gets the inputs connected to the compositor.
:returns: The reply to the inputs command
:rtype: list(:class:`i3ipc.InputReply`)
"""
data = self._message(MessageType.GET_INPUTS, '')
data = json.loads(data)
return InputReply._parse_list(data)
def get_seats(self) -> List[SeatReply]:
"""(sway only) Gets the seats configured on the compositor
:returns: The reply to the seats command
:rtype: list(:class:`i3ipc.SeatReply`)
"""
data = self._message(MessageType.GET_SEATS, '')
data = json.loads(data)
return SeatReply._parse_list(data)
def get_workspaces(self) -> List[WorkspaceReply]:
"""Gets the list of current workspaces.
:returns: A list of current workspaces
:rtype: list(:class:`i3ipc.WorkspaceReply`)
"""
data = self._message(MessageType.GET_WORKSPACES, '')
data = json.loads(data)
return WorkspaceReply._parse_list(data)
def get_tree(self) -> Con:
"""Gets the root container of the i3 layout tree.
:returns: The root container of the i3 layout tree.
:rtype: :class:`i3ipc.Con`
"""
data = self._message(MessageType.GET_TREE, '')
return Con(json.loads(data), None, self)
def get_marks(self) -> List[str]:
"""Gets the names of all currently set marks.
:returns: A list of currently set marks.
:rtype: list(str)
"""
data = self._message(MessageType.GET_MARKS, '')
return json.loads(data)
def get_binding_modes(self) -> List[str]:
"""Gets the names of all currently configured binding modes
:returns: A list of binding modes
:rtype: list(str)
"""
data = self._message(MessageType.GET_BINDING_MODES, '')
return json.loads(data)
def get_config(self) -> ConfigReply:
"""Returns the last loaded i3 config.
:returns: A class containing the config.
:rtype: :class:`i3ipc.ConfigReply`
"""
data = self._message(MessageType.GET_CONFIG, '')
data = json.loads(data)
return ConfigReply(data)
def send_tick(self, payload: str = "") -> TickReply:
"""Sends a tick with the specified payload.
:returns: The reply to the tick command
:rtype: :class:`i3ipc.TickReply`
"""
data = self._message(MessageType.SEND_TICK, payload)
data = json.loads(data)
return TickReply(data)
def _subscribe(self, events):
events_obj = []
if events & EventType.WORKSPACE.value:
events_obj.append("workspace")
if events & EventType.OUTPUT.value:
events_obj.append("output")
if events & EventType.MODE.value:
events_obj.append("mode")
if events & EventType.WINDOW.value:
events_obj.append("window")
if events & EventType.BARCONFIG_UPDATE.value:
events_obj.append("barconfig_update")
if events & EventType.BINDING.value:
events_obj.append("binding")
if events & EventType.SHUTDOWN.value:
events_obj.append("shutdown")
if events & EventType.TICK.value:
events_obj.append("tick")
if events & EventType.INPUT.value:
events_obj.append("input")
try:
self._sub_lock.acquire()
data = self._ipc_send(self._sub_socket, MessageType.SUBSCRIBE, json.dumps(events_obj))
finally:
self._sub_lock.release()
data = json.loads(data)
result = CommandReply(data)
self.subscriptions |= events
return result
def off(self, handler: Callable[['Connection', IpcBaseEvent], None]):
"""Unsubscribe the handler from being called on ipc events.
:param handler: The handler that was previously attached with
:func:`on()`.
:type handler: :class:`Callable`
"""
self._pubsub.unsubscribe(handler)
def on(self, event: Union[Event, str], handler: Callable[['Connection', IpcBaseEvent], None]):
"""Subscribe to the event and call the handler when it is emitted by
the i3 ipc.
:param event: The event to subscribe to.
:type event: :class:`Event <i3ipc.Event>` or str
:param handler: The event handler to call.
:type handler: :class:`Callable`
"""
if type(event) is Event:
event = event.value
event = event.replace('-', '_')
if event.count('::') > 0:
[base_event, __] = event.split('::')
else:
base_event = event
# special case: ipc-shutdown is not in the protocol
if event == 'ipc_shutdown':
# TODO deprecate this
self._pubsub.subscribe(event, handler)
return
event_type = 0
if base_event == 'workspace':
event_type = EventType.WORKSPACE
elif base_event == 'output':
event_type = EventType.OUTPUT
elif base_event == 'mode':
event_type = EventType.MODE
elif base_event == 'window':
event_type = EventType.WINDOW
elif base_event == 'barconfig_update':
event_type = EventType.BARCONFIG_UPDATE
elif base_event == 'binding':
event_type = EventType.BINDING
elif base_event == 'shutdown':
event_type = EventType.SHUTDOWN
elif base_event == 'tick':
event_type = EventType.TICK
elif base_event == 'input':
event_type = EventType.INPUT
if not event_type:
raise Exception('event not implemented')
self.subscriptions |= event_type.value
self._pubsub.subscribe(event, handler)
def _event_socket_setup(self):
self._sub_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._sub_socket.connect(self._socket_path)
self._subscribe(self.subscriptions)
def _event_socket_teardown(self):
if self._sub_socket:
self._sub_socket.shutdown(socket.SHUT_RDWR)
self._sub_socket = None
def _event_socket_poll(self):
if self._sub_socket is None:
return True
logger.info('getting ipc event from subscription socket')
data, msg_type = self._ipc_recv(self._sub_socket)
if len(data) == 0:
logger.info('subscription socket got EOF, shutting down')
self._pubsub.emit('ipc_shutdown', None)
return True
data = json.loads(data)
msg_type = 1 << (msg_type & 0x7f)
event_name = ''
event = None
if msg_type == EventType.WORKSPACE.value:
event_name = 'workspace'
event = WorkspaceEvent(data, self)
elif msg_type == EventType.OUTPUT.value:
event_name = 'output'
event = OutputEvent(data)
elif msg_type == EventType.MODE.value:
event_name = 'mode'
event = ModeEvent(data)
elif msg_type == EventType.WINDOW.value:
event_name = 'window'
event = WindowEvent(data, self)
elif msg_type == EventType.BARCONFIG_UPDATE.value:
event_name = 'barconfig_update'
event = BarconfigUpdateEvent(data)
elif msg_type == EventType.BINDING.value:
event_name = 'binding'
event = BindingEvent(data)
elif msg_type == EventType.SHUTDOWN.value:
event_name = 'shutdown'
event = ShutdownEvent(data)
elif msg_type == EventType.TICK.value:
event_name = 'tick'
event = TickEvent(data)
elif msg_type == EventType.INPUT.value:
event_name = 'input'
event = InputEvent(data)
else:
# we have not implemented this event
return
try:
self._pubsub.emit(event_name, event)
except Exception as e:
print(e)
raise e
def main(self, timeout: float = 0.0):
"""Starts the main loop for this connection to start handling events.
:param timeout: If given, quit the main loop after ``timeout`` seconds.
:type timeout: float
"""
loop_exception = None
self._quitting = False
timer = None
logger.info('starting the main loop')
while True:
try:
self._event_socket_setup()
if timeout:
timer = Timer(timeout, self.main_quit)
timer.start()
while not self._event_socket_poll():
pass
except Exception as e:
loop_exception = e
finally:
if timer:
timer.cancel()
self._event_socket_teardown()
if self._quitting or not self.auto_reconnect:
break
if not self._wait_for_socket():
break
if loop_exception:
raise loop_exception
def main_quit(self):
"""Quits the running main loop for this connection."""
logger.info('shutting down the main loop')
self._quitting = True
self._event_socket_teardown()
| 34.640351
| 106
| 0.603343
|
c30798bbfd8f141afe8ca76d7fbd3ba89f79376b
| 5,866
|
py
|
Python
|
user/views.py
|
penagos/scientiapy
|
c9bc7f3e2c9c15e7b8608b7e9b94620416a83bab
|
[
"MIT"
] | 2
|
2020-08-20T19:47:00.000Z
|
2021-08-11T10:31:48.000Z
|
user/views.py
|
penagos/scientiapy
|
c9bc7f3e2c9c15e7b8608b7e9b94620416a83bab
|
[
"MIT"
] | null | null | null |
user/views.py
|
penagos/scientiapy
|
c9bc7f3e2c9c15e7b8608b7e9b94620416a83bab
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import authenticate, logout, login as auth_login
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, redirect, render
from django.http import HttpResponseRedirect, JsonResponse
from django.urls import reverse
from questions.models import Post
from user.models import Profile, Setting
# Create your views here.
def activate(request):
return JsonResponse({'success': True})
def login(request):
# Handle both POST and GET requests
if request.method == "POST":
user = authenticate(username=request.POST['username'],
password=request.POST['password'])
if user is not None:
# Successful login
auth_login(request, user)
return JsonResponse({'success': True})
else:
# Failure
return JsonResponse({
'success': False,
'message': 'Could not log you in'
})
else:
context = {}
return render(request, 'user/login.html', context)
def join(request):
# If logged in, redirect to index
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('questions:index'))
else:
# Handle post requests for new users
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
password2 = request.POST['password2']
email = request.POST['email']
# Make sure passwords match
if password != password2:
return JsonResponse({
'success': False,
'message': 'Passwords must match'
})
# Even though the frontend checks this for us, we check again to be
# on the safe side
if email == "":
return JsonResponse({
'success': False,
'message': 'Email cannot be empty'
})
if username == "":
return JsonResponse({
'success': False,
'message': 'Username cannot be empty'
})
if len(password) < 4:
return JsonResponse({
'success':
False,
'message':
'Password must be at least 4 characters long'
})
# Check if username exists
usr = User.objects.filter(username=username)
if usr.exists():
return JsonResponse({
'success': False,
'message': 'Username already taken'
})
else:
try:
user = User.objects.create_user(username, email, password)
user.save()
except:
return JsonResponse({
'success':
False,
'message':
'An unexpected error occurred'
})
user = authenticate(username=username, password=password)
auth_login(request, user)
return JsonResponse({'success': True})
else:
context = {}
return render(request, 'user/join.html', context)
def logoff(request):
logout(request)
return redirect('/')
def forgotpassword(request):
context = {}
return render(request, 'user/forgotpassword.html', context)
def profile(request, uid):
user = get_object_or_404(User, pk=uid)
user.profile = Profile.objects.get_or_create(user=user)[0]
recentQuestions = Post.getRecentQuestionsByUser(uid, 5)
recentAnswers = Post.getRecentAnswersByUser(uid, 5)
context = {
'profileUser': user,
'recentQuestions': recentQuestions,
'recentAnswers': recentAnswers
}
return render(request, 'user/profile.html', context)
def settings(request, uid):
if not request.user.is_authenticated:
raise PermissionDenied
else:
user = get_object_or_404(User, pk=uid)
user.profile = Profile.objects.get_or_create(user=user)[0]
user.setting = Setting.objects.get_or_create(user=user)[0]
if request.method == "POST":
if 'receive_digests' in request.POST:
# Enable
user.setting.receive_digests = True
else:
# Disable
user.setting.receive_digests = False
if 'subscribe_all' in request.POST:
# Enable
user.setting.subscribe_all = True
else:
# Disable
user.setting.subscribe_all = False
user.profile.about = request.POST['about']
user.setting.save()
user.profile.save()
return HttpResponseRedirect(
reverse('user:settings', args=(request.user.id, )) +
'?success=1')
else:
if user.setting.receive_digests:
receive_digests = 'checked'
else:
receive_digests = ''
if user.setting.subscribe_all:
subscribe_all = 'checked'
else:
subscribe_all = ''
context = {
'uid': uid,
'receive_digests': receive_digests,
'subscribe_all': subscribe_all,
'aboutme': user.profile.about,
'email': user.email
}
return render(request, 'user/settings.html', context)
def all(request):
query = request.GET.get('query')
users = User.objects.filter(username__icontains=query)
return JsonResponse([x.username for x in list(users)], safe=False)
| 32.40884
| 79
| 0.536652
|
002221126913d97a2111b981d4662ea4a04f9f1c
| 6,162
|
py
|
Python
|
.github/scripts/build_assets/api_handler.py
|
Thomas-Boi/devicon
|
ac5f98152afda508ba2f1217f6b8ca7ef7b6a4f2
|
[
"MIT"
] | null | null | null |
.github/scripts/build_assets/api_handler.py
|
Thomas-Boi/devicon
|
ac5f98152afda508ba2f1217f6b8ca7ef7b6a4f2
|
[
"MIT"
] | 47
|
2021-01-09T06:53:25.000Z
|
2021-12-27T07:53:28.000Z
|
.github/scripts/build_assets/api_handler.py
|
Thomas-Boi/devicon
|
ac5f98152afda508ba2f1217f6b8ca7ef7b6a4f2
|
[
"MIT"
] | 2
|
2021-12-30T11:27:47.000Z
|
2022-01-28T18:20:49.000Z
|
import requests
import sys
import re
from typing import List
# our base url which leads to devicon
# base_url = "https://api.github.com/repos/devicons/devicon/"
# testing url
base_url = "https://api.github.com/repos/Thomas-Boi/devicon/"
def get_merged_pull_reqs_since_last_release(token):
"""
Get all the merged pull requests since the last release.
"""
stopPattern = r"^(r|R)elease v"
pull_reqs = []
found_last_release = False
page = 1
print("Getting PRs since last release.")
while not found_last_release:
data = get_merged_pull_reqs(token, page)
# assume we don't encounter it during the loop
last_release_index = 101
for i in range(len(data)):
if re.search(stopPattern, data[i]["title"]):
found_last_release = True
last_release_index = i
break
pull_reqs.extend(data[:last_release_index])
page += 1
# should contain all the PRs since last release
return pull_reqs
def get_merged_pull_reqs(token, page):
"""
Get the merged pull requests based on page. There are
100 results per page. See https://docs.github.com/en/rest/reference/pulls
for more details on the parameters.
:param token, a GitHub API token.
:param page, the page number.
"""
url = base_url + "pulls"
headers = {
"Authorization": f"token {token}"
}
params = {
"accept": "application/vnd.github.v3+json",
"state": "closed",
"per_page": 100,
"page": page
}
print(f"Querying the GitHub API for requests page #{page}")
response = requests.get(url, headers=headers, params=params)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}. Message is {response.text}")
sys.exit(1)
closed_pull_reqs = response.json()
return [merged_pull_req
for merged_pull_req in closed_pull_reqs
if merged_pull_req["merged_at"] is not None]
def is_feature_icon(pull_req_data):
"""
Check whether the pullData is a feature:icon PR.
:param pull_req_data - the data on a specific pull request from GitHub.
:return true if the pullData has a label named "feature:icon"
"""
for label in pull_req_data["labels"]:
if label["name"] == "feature:icon":
return True
return False
def find_all_authors(pull_req_data, token):
"""
Find all the authors of a PR based on its commits.
:param pull_req_data - the data on a specific pull request from GitHub.
:param token - a GitHub API token.
"""
headers = {
"Authorization": f"token {token}"
}
response = requests.get(pull_req_data["commits_url"], headers=headers)
if not response:
print(f"Can't query the GitHub API. Status code is {response.status_code}")
print("Response is: ", response.text)
return
commits = response.json()
authors = set() # want unique authors only
for commit in commits:
try:
# this contains proper referenceable github name
authors.add(commit["author"]["login"])
except TypeError:
# special case
authors.add(commit["commit"]["author"]["name"])
print(f"This URL didn't have an `author` attribute: {pull_req_data['commits_url']}")
return ", ".join(["@" + author for author in list(authors)])
def label_issues(token: str, repo: str, issues: List[str], labels: List[str]):
"""
Label the issues specified with the label specified.
:param token: the GitHub API token.
:param issues: the issue numbers (as str) that we are labelling.
:param labels: the labels that we are labelling.
"""
headers = {
"Authorization": f"token {token}",
"accept": "application/vnd.github.v3+json"
}
url = base_url + "issues/{}/labels"
for issue in issues:
body = {
"labels": labels
}
response = requests.post(url.format(repo, issue), headers=headers, json=body)
if not response:
raise Exception(f"Can't label the Issue provided. Issue: {issue}, labels: {labels}, API response: " + response.text)
else:
print(f"Successfully labelled issue {issue}")
def close_issues(token: str, issues: List[str]):
"""
Close issues.
:param token: the GitHub API token.
:param issues: the issue numbers (as str) that we are labelling.
"""
headers = {
"Authorization": f"token {token}",
"accept": "application/vnd.github.v3+json"
}
url = base_url + "issues/{}"
body = {
"state": "closed"
}
for issue in issues:
response = requests.patch(url.format(issue), headers=headers, json=body)
if not response:
raise Exception(f"Can't close Issue provided. Issue: {issue}, API response: " + response.text)
else:
print(f"Successfully closed issue {issue}")
def get_issues_by_labels(token: str, labels: List[str]):
"""
Get a list of issues based on their labels.
:param token: the GitHub API token.
:param labels: the labels that we are labelling.
"""
url = base_url + "issues?per_page=100&labels={}&page={}"
headers = {
"Authorization": f"token {token}",
"accept": "application/vnd.github.v3+json"
}
issues = []
done = False
page_num = 1
while not done:
response = requests.get(url.format(",".join(labels), page_num), headers=headers)
if not response:
raise Exception(f"Can't access API. Can't get issues for labels: {labels}, API response: " + response.text)
else:
results = response.json()
if len(results) < 100:
done = True # we are done
else:
page_num += 1 # page is full => might need to check another page
# GitHub API also returns PRs for issues queries => have to check
issues_only = [issue for issue in results if issue.get("pull_request") is None]
issues.extend(issues_only)
return issues
| 33.308108
| 128
| 0.620902
|
b44a978adc34a602aff357c90dcfbc78dc927e2f
| 3,715
|
py
|
Python
|
official/nlp/modeling/layers/on_device_embedding.py
|
caleblu/influence-patterns
|
fa7ba555fdee9561865c88fe59364840f9721841
|
[
"MIT"
] | 3
|
2021-12-08T07:47:41.000Z
|
2022-02-16T23:07:58.000Z
|
official/nlp/modeling/layers/on_device_embedding.py
|
caleblu/influence-patterns
|
fa7ba555fdee9561865c88fe59364840f9721841
|
[
"MIT"
] | 1
|
2022-02-05T18:27:14.000Z
|
2022-02-05T18:27:14.000Z
|
official/nlp/modeling/layers/on_device_embedding.py
|
caleblu/influence-patterns
|
fa7ba555fdee9561865c88fe59364840f9721841
|
[
"MIT"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based one-hot embedding layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package="Text")
class OnDeviceEmbedding(tf.keras.layers.Layer):
"""Performs an embedding lookup suitable for accelerator devices.
This layer uses either tf.gather or tf.one_hot to translate integer indices to
float embeddings.
Arguments:
vocab_size: Number of elements in the vocabulary.
embedding_width: Output size of the embedding layer.
initializer: The initializer to use for the embedding weights. Defaults to
"glorot_uniform".
use_one_hot: Whether to use tf.one_hot over tf.gather for the embedding
lookup. Defaults to False (that is, using tf.gather). Setting this option
to True may improve performance, especially on small vocabulary sizes,
but will generally require more memory.
"""
def __init__(self,
vocab_size,
embedding_width,
initializer="glorot_uniform",
use_one_hot=False,
**kwargs):
# We need to have a default dtype of float32, since the inputs (which Keras
# usually uses to infer the dtype) will always be int32.
if "dtype" not in kwargs:
kwargs["dtype"] = "float32"
super(OnDeviceEmbedding, self).__init__(**kwargs)
self._vocab_size = vocab_size
self._embedding_width = embedding_width
self._initializer = initializer
self._use_one_hot = use_one_hot
def get_config(self):
config = {
"vocab_size": self._vocab_size,
"embedding_width": self._embedding_width,
"initializer": self._initializer,
"use_one_hot": self._use_one_hot,
}
base_config = super(OnDeviceEmbedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.embeddings = self.add_weight(
"embeddings",
shape=[self._vocab_size, self._embedding_width],
initializer=self._initializer)
super(OnDeviceEmbedding, self).build(input_shape)
def call(self, inputs):
input_shape = tf_utils.get_shape_list(inputs, expected_rank=2)
input_shape.append(self._embedding_width)
flat_inputs = tf.reshape(inputs, [-1])
if self._use_one_hot:
one_hot_data = tf.one_hot(flat_inputs,
depth=self._vocab_size,
dtype=self._dtype)
embeddings = tf.matmul(one_hot_data, self.embeddings)
else:
embeddings = tf.gather(self.embeddings, flat_inputs)
embeddings = tf.reshape(embeddings, input_shape)
return embeddings
| 39.521277
| 83
| 0.664872
|
c1fe0020f9e65e521a36bffb15619299e145245b
| 46,015
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/igmpmldrange_050273ee07c40d76e84d29c163a0efbe.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/igmpmldrange_050273ee07c40d76e84d29c163a0efbe.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/igmpmldrange_050273ee07c40d76e84d29c163a0efbe.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IgmpMldRange(Base):
"""
The IgmpMldRange class encapsulates a list of igmpMldRange resources that are managed by the user.
A list of resources can be retrieved from the server using the IgmpMldRange.find() method.
The list can be managed by using the IgmpMldRange.add() and IgmpMldRange.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'igmpMldRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'GeneralQueryResponseMode': 'generalQueryResponseMode',
'ImmediateResponse': 'immediateResponse',
'JoinLeaveMultiplier': 'joinLeaveMultiplier',
'MeshingMode': 'meshingMode',
'Name': 'name',
'ObjectId': 'objectId',
'ReportFrequency': 'reportFrequency',
'RouterAlert': 'routerAlert',
'SpecificQueryResponseMode': 'specificQueryResponseMode',
'UnsolicitedResponseMode': 'unsolicitedResponseMode',
'Version': 'version',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IgmpMldRange, self).__init__(parent, list_op)
@property
def JoinLeaveMulticastGroupRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.joinleavemulticastgrouprange_895f2e34c0a259f0cbb9a0509f35859e.JoinLeaveMulticastGroupRange): An instance of the JoinLeaveMulticastGroupRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.joinleavemulticastgrouprange_895f2e34c0a259f0cbb9a0509f35859e import JoinLeaveMulticastGroupRange
if self._properties.get('JoinLeaveMulticastGroupRange', None) is not None:
return self._properties.get('JoinLeaveMulticastGroupRange')
else:
return JoinLeaveMulticastGroupRange(self)
@property
def MulticastGroupRange(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.multicastgrouprange_97055a203584162212eb3ed86476c6d6.MulticastGroupRange): An instance of the MulticastGroupRange class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.multicastgrouprange_97055a203584162212eb3ed86476c6d6 import MulticastGroupRange
if self._properties.get('MulticastGroupRange', None) is not None:
return self._properties.get('MulticastGroupRange')
else:
return MulticastGroupRange(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def GeneralQueryResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, responds to General Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'])
@GeneralQueryResponseMode.setter
def GeneralQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['GeneralQueryResponseMode'], value)
@property
def ImmediateResponse(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
"""
return self._get_attribute(self._SDM_ATT_MAP['ImmediateResponse'])
@ImmediateResponse.setter
def ImmediateResponse(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['ImmediateResponse'], value)
@property
def JoinLeaveMultiplier(self):
# type: () -> int
"""
Returns
-------
- number: The number of times a host sends every Join or Leave message.
"""
return self._get_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'])
@JoinLeaveMultiplier.setter
def JoinLeaveMultiplier(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['JoinLeaveMultiplier'], value)
@property
def MeshingMode(self):
# type: () -> str
"""
Returns
-------
- str: Defines how the hosts in a range join the selected multicast group ranges.
"""
return self._get_attribute(self._SDM_ATT_MAP['MeshingMode'])
@MeshingMode.setter
def MeshingMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['MeshingMode'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def ReportFrequency(self):
# type: () -> int
"""
Returns
-------
- number: When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReportFrequency'])
@ReportFrequency.setter
def ReportFrequency(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['ReportFrequency'], value)
@property
def RouterAlert(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, sets the Send Router Alert bit in the IP header.
"""
return self._get_attribute(self._SDM_ATT_MAP['RouterAlert'])
@RouterAlert.setter
def RouterAlert(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['RouterAlert'], value)
@property
def SpecificQueryResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, responds to Group-Specific Query messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'])
@SpecificQueryResponseMode.setter
def SpecificQueryResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['SpecificQueryResponseMode'], value)
@property
def UnsolicitedResponseMode(self):
# type: () -> bool
"""
Returns
-------
- bool: If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
"""
return self._get_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'])
@UnsolicitedResponseMode.setter
def UnsolicitedResponseMode(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UnsolicitedResponseMode'], value)
@property
def Version(self):
# type: () -> str
"""
Returns
-------
- str: IGMP/MLD protocol version.
"""
return self._get_attribute(self._SDM_ATT_MAP['Version'])
@Version.setter
def Version(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Version'], value)
def update(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Updates igmpMldRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Adds a new igmpMldRange resource on the server and adds it to the container.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Returns
-------
- self: This instance with all currently retrieved igmpMldRange resources using find and the newly added igmpMldRange resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained igmpMldRange resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enabled=None, GeneralQueryResponseMode=None, ImmediateResponse=None, JoinLeaveMultiplier=None, MeshingMode=None, Name=None, ObjectId=None, ReportFrequency=None, RouterAlert=None, SpecificQueryResponseMode=None, UnsolicitedResponseMode=None, Version=None):
# type: (bool, bool, bool, int, str, str, str, int, bool, bool, bool, str) -> IgmpMldRange
"""Finds and retrieves igmpMldRange resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve igmpMldRange resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all igmpMldRange resources from the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- GeneralQueryResponseMode (bool): If selected, responds to General Query messages.
- ImmediateResponse (bool): If selected, it will ignore the value specified in the Maximum Response Delay in the Membership Query message, assume that the Delay is always = 0 seconds and immediately respond to the Query by sending a Report.
- JoinLeaveMultiplier (number): The number of times a host sends every Join or Leave message.
- MeshingMode (str): Defines how the hosts in a range join the selected multicast group ranges.
- Name (str): Name of range
- ObjectId (str): Unique identifier for this object
- ReportFrequency (number): When Send Unsolicited Response is enabled, specifies the frequency, in seconds, with which unsolicited messages are generated.
- RouterAlert (bool): If selected, sets the Send Router Alert bit in the IP header.
- SpecificQueryResponseMode (bool): If selected, responds to Group-Specific Query messages.
- UnsolicitedResponseMode (bool): If selected, causes the emulated IGMP host to automatically send full membership messages at regular intervals, without waiting for a query message.
- Version (str): IGMP/MLD protocol version.
Returns
-------
- self: This instance with matching igmpMldRange resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of igmpMldRange data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the igmpMldRange resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Apply changes for on the fly configuration support.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
def IgmpMldJoin(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldJoin operation on the server.
Join IGMP/MLD multicast group ranges on the fly
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldJoin(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldJoin(Arg2=enum, async_operation=bool)
--------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldJoin', payload=payload, response_object=None)
def IgmpMldLeave(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldLeave operation on the server.
Leave IGMP/MLD multicast group ranges on the fly
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldLeave(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldLeave(Arg2=enum, async_operation=bool)
---------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldLeave', payload=payload, response_object=None)
def IgmpMldStart(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldStart operation on the server.
Start IGMP/MLD on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldStart(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldStart(Arg2=enum, async_operation=bool)
---------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldStart', payload=payload, response_object=None)
def IgmpMldStop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpMldStop operation on the server.
Stop IGMP/MLD on selected plugins and ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpMldStop(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpMldStop(Arg2=enum, async_operation=bool)
--------------------------------------------
- Arg2 (str(async | sync)): kArray[kObjref=/vport/protocolStack/atm/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/igmpMld,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/atm/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/igmpMld,/vport/protocolStack/atm/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/atm/ipEndpoint/igmpMld,/vport/protocolStack/atm/ipEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/atm/pppox/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/igmpMld,/vport/protocolStack/atm/pppoxEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/dhcpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/emulatedRouter/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpPcrfS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8PgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpSgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/egtpUeS5S8SgwEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/igmpMld,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLacEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tp/dhcpoLnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/l2tpEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ip/smDnsEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/ipEndpoint/igmpMld,/vport/protocolStack/ethernet/ipEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppClientEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/dhcpoPppServerEndpoint/range/igmpMldRange,/vport/protocolStack/ethernet/pppox/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/igmpMld,/vport/protocolStack/ethernet/pppoxEndpoint/range/igmpMldRange]
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpMldStop', payload=payload, response_object=None)
| 82.169643
| 4,848
| 0.75358
|
432a50795cff4dcaa0858b46870133ab8308d3bb
| 3,559
|
py
|
Python
|
plugins/wedo_plugin/usb/__init__.py
|
RodPy/TurtleBots.activity-1
|
b976cb817f932f9716504c7bd38700401177cc0c
|
[
"MIT"
] | 1
|
2018-08-27T21:12:52.000Z
|
2018-08-27T21:12:52.000Z
|
usb/__init__.py
|
HiJasper/pyusb
|
2e4679183c94d5733d0e22f82ac9e827637a63ff
|
[
"BSD-3-Clause"
] | null | null | null |
usb/__init__.py
|
HiJasper/pyusb
|
2e4679183c94d5733d0e22f82ac9e827637a63ff
|
[
"BSD-3-Clause"
] | 4
|
2016-05-10T12:20:27.000Z
|
2017-08-12T03:41:42.000Z
|
# Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""PyUSB - Easy USB access in Python
This package exports the following modules and subpackages:
core - the main USB implementation
legacy - the compatibility layer with 0.x version
backend - the support for backend implementations.
control - USB standard control requests.
libloader - helper module for backend library loading.
Since version 1.0, main PyUSB implementation lives in the 'usb.core'
module. New applications are encouraged to use it.
"""
import logging
import os
__author__ = 'Wander Lairson Costa'
# Use Semantic Versioning, http://semver.org/
version_info = (1, 0, 0, 'rc1')
__version__ = '%d.%d.%d%s' % version_info
__all__ = ['legacy', 'control', 'core', 'backend', 'util', 'libloader']
def _setup_log():
from usb import _debug
logger = logging.getLogger('usb')
debug_level = os.getenv('PYUSB_DEBUG')
if debug_level is not None:
_debug.enable_tracing(True)
filename = os.getenv('PYUSB_LOG_FILENAME')
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
level = LEVELS.get(debug_level, logging.CRITICAL + 10)
logger.setLevel(level = level)
try:
handler = logging.FileHandler(filename)
except:
handler = logging.StreamHandler()
fmt = logging.Formatter('%(asctime)s %(levelname)s:%(name)s:%(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
else:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# We set the log level to avoid delegation to the
# parent log handler (if there is one).
# Thanks to Chris Clark to pointing this out.
logger.setLevel(logging.CRITICAL + 10)
logger.addHandler(NullHandler())
_setup_log()
# We import all 'legacy' module symbols to provide compatibility
# with applications that use 0.x versions.
from usb.legacy import *
| 36.316327
| 81
| 0.70694
|
77b138d6ac2416cf21389f190b778cdeb21b5c28
| 1,318
|
py
|
Python
|
XSum-Topic-ConvS2S/fairseq/criterions/fairseq_criterion.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 235
|
2018-11-26T16:53:27.000Z
|
2022-03-24T13:04:48.000Z
|
XSum-Topic-ConvS2S/fairseq/criterions/fairseq_criterion.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 24
|
2018-12-19T01:02:27.000Z
|
2022-01-16T07:47:36.000Z
|
XSum-Topic-ConvS2S/fairseq/criterions/fairseq_criterion.py
|
zsquaredz/XSum
|
10f2fac2e70801e7a3973c864b5a24b61d3f8bfe
|
[
"MIT"
] | 59
|
2018-12-07T18:57:05.000Z
|
2022-03-24T13:34:09.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, args, src_dict, dst_dict):
super().__init__()
self.args = args
self.padding_idx = dst_dict.pad()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
pass
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss, as a Variable
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError
@staticmethod
def grad_denom(sample_sizes):
"""Compute the gradient denominator for a set of sample sizes."""
return sum(sample_sizes)
| 31.380952
| 78
| 0.682853
|
4dba1499c072995f75d5993d336399b757b5a209
| 4,264
|
py
|
Python
|
optimised_forest.py
|
hcbh96/Random-Forest-FYP
|
4af2c85a4f4d998f616751f9c366329bdc559b13
|
[
"MIT"
] | null | null | null |
optimised_forest.py
|
hcbh96/Random-Forest-FYP
|
4af2c85a4f4d998f616751f9c366329bdc559b13
|
[
"MIT"
] | null | null | null |
optimised_forest.py
|
hcbh96/Random-Forest-FYP
|
4af2c85a4f4d998f616751f9c366329bdc559b13
|
[
"MIT"
] | null | null | null |
"""
In this file I will:
Use a Randomised Search to find optimal RF params
Train A RF with optimised params
Evaluate Accuracy
Evaluate Sensitivity
Evaluate Precision
Evaluate ROC AUC
Determine the features of greatest importance
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from confusion_matrix import plot_confusion_matrix
from evaluate_model import evaluate_model, performance_assessor
from sklearn.metrics import confusion_matrix
# Set random seed to ensure reproducible runs
RSEED = 30
dtfm=pd.read_excel('cleaned_data.xlsx', sheet_name='Sheet1')
#Remove columns not to be used in modelling
dtfm = dtfm.drop(columns=['ORDEM','DATA','AMOSTRA','REPLICATA','ANIMAL','PARTIDA','CLIV','CELLS_COUNT'])
print("Describe Output Vars: \n {}".format(dtfm["BLAST_D8"].describe()))
"""
One of the thigns i need to do is categorise the output data
Where:
- 0 is bad quality 0 - 50%
- 1 is good quality 50 - 100%
I will use the following statistics to make the decsion:
Statistics for each column after outlier removal
CLIV BLAST_D8 CELLS_COUNT
count 313.000000 313.000000 180.000000
mean 72.070374 21.475320 171.115891
std 8.942164 11.093061 42.876076
min 49.350649 0.000000 57.000000
25% 65.079365 12.121212 144.875000
50% 72.151899 20.312500 169.875000
75% 79.487179 29.629630 195.437500
max 90.140845 53.623188 269.000000
For BLAST_D8:
0 < 21.475320
1 >= 21.475320
"""
# Update Labels in Blast_D8 and CLIV
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] >= 21.475320, other=0)
dtfm['BLAST_D8'] = dtfm['BLAST_D8'].where(dtfm['BLAST_D8'] < 21.475320, other=1)
# Make a copy for dtfm blast
print("Blast_D8 value counts:\n {}".format(dtfm['BLAST_D8'].value_counts()))
# Extract the labels
labels = np.array(dtfm.pop('BLAST_D8'))
# 30% examples in test data
train, test, train_labels, test_labels = train_test_split(dtfm, labels, stratify = labels, test_size = 0.3, random_state = RSEED)
#imputation of missing values
train = train.fillna(train.mean())
test = test.fillna(test.mean())
# Features for feature importances
features = list(train.columns)
print("Train Shape: {}".format(train.shape))
print("Test Shape: {}".format(test.shape))
"""
Optimising input params
"""
# Hyperparameter grid
param_grid = {
'n_estimators': np.linspace(10, 200).astype(int),
'max_depth': [None] + list(np.linspace(3, 20).astype(int)),
'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)),
'max_leaf_nodes': [None] + list(np.linspace(10, 50, 500).astype(int)),
'min_samples_split': [2, 5, 10],
'bootstrap': [True, False]
}
# Estimator for use in random search
estimator = RandomForestClassifier(random_state = RSEED)
# Create the random search model
rs = RandomizedSearchCV(estimator, param_grid, n_jobs = -1,
scoring = 'roc_auc', cv = 3,
n_iter = 10, verbose = 1, random_state=RSEED)
# Fit
rs.fit(train, train_labels)
print("Best params:\n{}".format(rs.best_params_))
# Try using the best model
best_model = rs.best_estimator_
train_rf_predictions = best_model.predict(train)
train_rf_probs = best_model.predict_proba(train)[:, 1]
rf_predictions = best_model.predict(test)
rf_probs = best_model.predict_proba(test)[:, 1]
n_nodes = []
max_depths = []
for ind_tree in best_model.estimators_:
n_nodes.append(ind_tree.tree_.node_count)
max_depths.append(ind_tree.tree_.max_depth)
print('Average number of nodes: {}'.format(int(np.mean(n_nodes))))
print('Average maximum depth: {}'.format(int(np.mean(max_depths))))
evaluate_model(rf_predictions, rf_probs, train_rf_predictions, train_rf_probs,test_labels, train_labels, title='Optimised Forest ROC Curve')
# print other metrics
performance_assessor(rf_predictions, rf_probs, train_rf_predictions, train_rf_probs, test_labels, train_labels, logger=True)
# Plot confusion matrix
cm = confusion_matrix(test_labels, rf_predictions)
plot_confusion_matrix(cm, classes = ['Poor Health', 'Good Health'],
title = 'Optimised Forest Confusion Matrix')
| 31.352941
| 140
| 0.720216
|
ad5647cc38318b09d5a7f499fcb9c8bef4d0d8b4
| 69,112
|
py
|
Python
|
interfaces/acados_template/acados_template/acados_ocp_solver.py
|
psiori/acados
|
05398550e9c6f766a2c30356c35cc3a12df747fd
|
[
"BSD-2-Clause"
] | null | null | null |
interfaces/acados_template/acados_template/acados_ocp_solver.py
|
psiori/acados
|
05398550e9c6f766a2c30356c35cc3a12df747fd
|
[
"BSD-2-Clause"
] | 20
|
2019-08-15T15:13:54.000Z
|
2021-04-13T14:43:59.000Z
|
interfaces/acados_template/acados_template/acados_ocp_solver.py
|
psiori/acados
|
05398550e9c6f766a2c30356c35cc3a12df747fd
|
[
"BSD-2-Clause"
] | 2
|
2020-05-29T08:27:11.000Z
|
2020-10-16T10:50:28.000Z
|
# -*- coding: future_fstrings -*-
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import sys, os, json
import numpy as np
from datetime import datetime
from ctypes import *
from copy import deepcopy
from .generate_c_code_explicit_ode import generate_c_code_explicit_ode
from .generate_c_code_implicit_ode import generate_c_code_implicit_ode
from .generate_c_code_gnsf import generate_c_code_gnsf
from .generate_c_code_discrete_dynamics import generate_c_code_discrete_dynamics
from .generate_c_code_constraint import generate_c_code_constraint
from .generate_c_code_nls_cost import generate_c_code_nls_cost
from .generate_c_code_external_cost import generate_c_code_external_cost
from .acados_ocp import AcadosOcp
from .acados_model import acados_model_strip_casadi_symbolics
from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\
format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\
set_up_imported_gnsf_model, get_acados_path
def make_ocp_dims_consistent(acados_ocp):
dims = acados_ocp.dims
cost = acados_ocp.cost
constraints = acados_ocp.constraints
model = acados_ocp.model
opts = acados_ocp.solver_options
# nx
if is_column(model.x):
dims.nx = casadi_length(model.x)
else:
raise Exception('model.x should be column vector!')
# nu
if is_empty(model.u):
dims.nu = 0
else:
dims.nu = casadi_length(model.u)
# nz
if is_empty(model.z):
dims.nz = 0
else:
dims.nz = casadi_length(model.z)
# np
if is_empty(model.p):
dims.np = 0
else:
dims.np = casadi_length(model.p)
if acados_ocp.parameter_values.shape[0] != dims.np:
raise Exception('inconsistent dimension np, regarding model.p and parameter_values.' + \
f'\nGot np = {dims.np}, acados_ocp.parameter_values.shape = {acados_ocp.parameter_values.shape[0]}\n')
## cost
# initial stage - if not set, copy fields from path constraints
if cost.cost_type_0 is None:
cost.cost_type_0 = cost.cost_type
cost.W_0 = cost.W
cost.Vx_0 = cost.Vx
cost.Vu_0 = cost.Vu
cost.Vz_0 = cost.Vz
cost.yref_0 = cost.yref
cost.cost_ext_fun_type_0 = cost.cost_ext_fun_type
model.cost_y_expr_0 = model.cost_y_expr
model.cost_expr_ext_cost_0 = model.cost_expr_ext_cost
model.cost_expr_ext_cost_custom_hess_0 = model.cost_expr_ext_cost_custom_hess
if cost.cost_type_0 == 'LINEAR_LS':
ny_0 = cost.W_0.shape[0]
if cost.Vx_0.shape[0] != ny_0 or cost.Vu_0.shape[0] != ny_0:
raise Exception('inconsistent dimension ny_0, regarding W_0, Vx_0, Vu_0.' + \
f'\nGot W_0[{cost.W_0.shape}], Vx_0[{cost.Vx_0.shape}], Vu_0[{cost.Vu_0.shape}]\n')
if dims.nz != 0 and cost.Vz_0.shape[0] != ny_0:
raise Exception('inconsistent dimension ny_0, regarding W_0, Vx_0, Vu_0, Vz_0.' + \
f'\nGot W_0[{cost.W_0.shape}], Vx_0[{cost.Vx_0.shape}], Vu_0[{cost.Vu_0.shape}], Vz_0[{cost.Vz_0.shape}]\n')
if cost.Vx_0.shape[1] != dims.nx and ny_0 != 0:
raise Exception('inconsistent dimension: Vx_0 should have nx columns.')
if cost.Vu_0.shape[1] != dims.nu and ny_0 != 0:
raise Exception('inconsistent dimension: Vu_0 should have nu columns.')
if cost.yref_0.shape[0] != ny_0:
raise Exception('inconsistent dimension: regarding W_0, yref_0.' + \
f'\nGot W_0[{cost.W_0.shape}], yref_0[{cost.yref_0.shape}]\n')
dims.ny_0 = ny_0
elif cost.cost_type_0 == 'NONLINEAR_LS':
ny_0 = cost.W_0.shape[0]
if is_empty(model.cost_y_expr_0) and ny_0 != 0:
raise Exception('inconsistent dimension ny_0: regarding W_0, cost_y_expr.')
elif casadi_length(model.cost_y_expr_0) != ny_0:
raise Exception('inconsistent dimension ny_0: regarding W_0, cost_y_expr.')
if cost.yref_0.shape[0] != ny_0:
raise Exception('inconsistent dimension: regarding W_0, yref_0.' + \
f'\nGot W_0[{cost.W.shape}], yref_0[{cost.yref_0.shape}]\n')
dims.ny_0 = ny_0
# path
if cost.cost_type == 'LINEAR_LS':
ny = cost.W.shape[0]
if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n')
if dims.nz != 0 and cost.Vz.shape[0] != ny:
raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \
f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n')
if cost.Vx.shape[1] != dims.nx and ny != 0:
raise Exception('inconsistent dimension: Vx should have nx columns.')
if cost.Vu.shape[1] != dims.nu and ny != 0:
raise Exception('inconsistent dimension: Vu should have nu columns.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
elif cost.cost_type == 'NONLINEAR_LS':
ny = cost.W.shape[0]
if is_empty(model.cost_y_expr) and ny != 0:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
elif casadi_length(model.cost_y_expr) != ny:
raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.')
if cost.yref.shape[0] != ny:
raise Exception('inconsistent dimension: regarding W, yref.' + \
f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n')
dims.ny = ny
# terminal
if cost.cost_type_e == 'LINEAR_LS':
ny_e = cost.W_e.shape[0]
if cost.Vx_e.shape[0] != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \
f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]')
if cost.Vx_e.shape[1] != dims.nx and ny_e != 0:
raise Exception('inconsistent dimension: Vx_e should have nx columns.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
elif cost.cost_type_e == 'NONLINEAR_LS':
ny_e = cost.W_e.shape[0]
if is_empty(model.cost_y_expr_e) and ny_e != 0:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
elif casadi_length(model.cost_y_expr_e) != ny_e:
raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.')
if cost.yref_e.shape[0] != ny_e:
raise Exception('inconsistent dimension: regarding W_e, yref_e.')
dims.ny_e = ny_e
## constraints
# initial
if (constraints.lbx_0 == [] and constraints.ubx_0 == []):
dims.nbx_0 = 0
else:
this_shape = constraints.lbx_0.shape
other_shape = constraints.ubx_0.shape
if not this_shape == other_shape:
raise Exception('lbx_0, ubx_0 have different shapes!')
if not is_column(constraints.lbx_0):
raise Exception('lbx_0, ubx_0 must be column vectors!')
dims.nbx_0 = constraints.lbx_0.size
if all(constraints.lbx_0 == constraints.ubx_0) and dims.nbx_0 == dims.nx \
and dims.nbxe_0 == None \
and (constraints.idxbxe_0.shape == constraints.idxbx_0.shape)\
and all(constraints.idxbxe_0 == constraints.idxbx_0):
# case: x0 was set: nbx0 are all equlities.
dims.nbxe_0 = dims.nbx_0
elif dims.nbxe_0 == None:
# case: x0 was not set -> dont assume nbx0 to be equality constraints.
dims.nbxe_0 = 0
# path
nbx = constraints.idxbx.shape[0]
if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx:
raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.')
else:
dims.nbx = nbx
nbu = constraints.idxbu.shape[0]
if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu:
raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.')
else:
dims.nbu = nbu
ng = constraints.lg.shape[0]
if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \
or constraints.D.shape[0] != ng:
raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.')
else:
dims.ng = ng
if not is_empty(model.con_h_expr):
nh = casadi_length(model.con_h_expr)
else:
nh = 0
if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh:
raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.')
else:
dims.nh = nh
if is_empty(model.con_phi_expr):
dims.nphi = 0
dims.nr = 0
else:
dims.nphi = casadi_length(model.con_phi_expr)
if is_empty(model.con_r_expr):
raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty')
else:
dims.nr = casadi_length(model.con_r_expr)
# terminal
nbx_e = constraints.idxbx_e.shape[0]
if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e:
raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.')
else:
dims.nbx_e = nbx_e
ng_e = constraints.lg_e.shape[0]
if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e:
raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.')
else:
dims.ng_e = ng_e
if not is_empty(model.con_h_expr_e):
nh_e = casadi_length(model.con_h_expr_e)
else:
nh_e = 0
if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e:
raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.')
else:
dims.nh_e = nh_e
if is_empty(model.con_phi_expr_e):
dims.nphi_e = 0
dims.nr_e = 0
else:
dims.nphi_e = casadi_length(model.con_phi_expr_e)
if is_empty(model.con_r_expr_e):
raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty')
else:
dims.nr_e = casadi_length(model.con_r_expr_e)
# Slack dimensions
nsbx = constraints.idxsbx.shape[0]
if is_empty(constraints.lsbx):
constraints.lsbx = np.zeros((nsbx,))
elif constraints.lsbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.')
if is_empty(constraints.usbx):
constraints.usbx = np.zeros((nsbx,))
elif constraints.usbx.shape[0] != nsbx:
raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.')
dims.nsbx = nsbx
nsbu = constraints.idxsbu.shape[0]
if is_empty(constraints.lsbu):
constraints.lsbu = np.zeros((nsbu,))
elif constraints.lsbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.')
if is_empty(constraints.usbu):
constraints.usbu = np.zeros((nsbu,))
elif constraints.usbu.shape[0] != nsbu:
raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.')
dims.nsbu = nsbu
nsh = constraints.idxsh.shape[0]
if is_empty(constraints.lsh):
constraints.lsh = np.zeros((nsh,))
elif constraints.lsh.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.')
if is_empty(constraints.ush):
constraints.ush = np.zeros((nsh,))
elif constraints.ush.shape[0] != nsh:
raise Exception('inconsistent dimension nsh, regarding idxsh, ush.')
dims.nsh = nsh
nsphi = constraints.idxsphi.shape[0]
if is_empty(constraints.lsphi):
constraints.lsphi = np.zeros((nsphi,))
elif constraints.lsphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.')
if is_empty(constraints.usphi):
constraints.usphi = np.zeros((nsphi,))
elif constraints.usphi.shape[0] != nsphi:
raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.')
dims.nsphi = nsphi
nsg = constraints.idxsg.shape[0]
if is_empty(constraints.lsg):
constraints.lsg = np.zeros((nsg,))
elif constraints.lsg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.')
if is_empty(constraints.usg):
constraints.usg = np.zeros((nsg,))
elif constraints.usg.shape[0] != nsg:
raise Exception('inconsistent dimension nsg, regarding idxsg, usg.')
dims.nsg = nsg
ns = nsbx + nsbu + nsh + nsg + nsphi
wrong_field = ""
if cost.Zl.shape[0] != ns:
wrong_field = "Zl"
dim = cost.Zl.shape[0]
elif cost.Zu.shape[0] != ns:
wrong_field = "Zu"
dim = cost.Zu.shape[0]
elif cost.zl.shape[0] != ns:
wrong_field = "zl"
dim = cost.zl.shape[0]
elif cost.zu.shape[0] != ns:
wrong_field = "zu"
dim = cost.zu.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\
+ f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}')
dims.ns = ns
nsbx_e = constraints.idxsbx_e.shape[0]
if is_empty(constraints.lsbx_e):
constraints.lsbx_e = np.zeros((nsbx_e,))
elif constraints.lsbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.')
if is_empty(constraints.usbx_e):
constraints.usbx_e = np.zeros((nsbx_e,))
elif constraints.usbx_e.shape[0] != nsbx_e:
raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.')
dims.nsbx_e = nsbx_e
nsh_e = constraints.idxsh_e.shape[0]
if is_empty(constraints.lsh_e):
constraints.lsh_e = np.zeros((nsh_e,))
elif constraints.lsh_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.')
if is_empty(constraints.ush_e):
constraints.ush_e = np.zeros((nsh_e,))
elif constraints.ush_e.shape[0] != nsh_e:
raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.')
dims.nsh_e = nsh_e
nsg_e = constraints.idxsg_e.shape[0]
if is_empty(constraints.lsg_e):
constraints.lsg_e = np.zeros((nsg_e,))
elif constraints.lsg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.')
if is_empty(constraints.usg_e):
constraints.usg_e = np.zeros((nsg_e,))
elif constraints.usg_e.shape[0] != nsg_e:
raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.')
dims.nsg_e = nsg_e
nsphi_e = constraints.idxsphi_e.shape[0]
if is_empty(constraints.lsphi_e):
constraints.lsphi_e = np.zeros((nsphi_e,))
elif constraints.lsphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.')
if is_empty(constraints.usphi_e):
constraints.usphi_e = np.zeros((nsphi_e,))
elif constraints.usphi_e.shape[0] != nsphi_e:
raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.')
dims.nsphi_e = nsphi_e
# terminal
ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e
wrong_field = ""
if cost.Zl_e.shape[0] != ns_e:
wrong_field = "Zl_e"
dim = cost.Zl_e.shape[0]
elif cost.Zu_e.shape[0] != ns_e:
wrong_field = "Zu_e"
dim = cost.Zu_e.shape[0]
elif cost.zl_e.shape[0] != ns_e:
wrong_field = "zl_e"
dim = cost.zl_e.shape[0]
elif cost.zu_e.shape[0] != ns_e:
wrong_field = "zu_e"
dim = cost.zu_e.shape[0]
if wrong_field != "":
raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\
+ f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\
+ f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}')
dims.ns_e = ns_e
# discretization
if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes):
# uniform discretization
opts.time_steps = opts.tf / dims.N * np.ones((dims.N,))
elif not is_empty(opts.shooting_nodes):
if np.shape(opts.shooting_nodes)[0] != dims.N+1:
raise Exception('inconsistent dimension N, regarding shooting_nodes.')
time_steps = opts.shooting_nodes[1:] - opts.shooting_nodes[0:-1]
# identify constant time-steps: due to numerical reasons the content of time_steps might vary a bit
delta_time_steps = time_steps[1:] - time_steps[0:-1]
avg_time_steps = np.average(time_steps)
# criterion for constant time-step detection: the min/max difference in values normalized by the average
check_const_time_step = np.max(delta_time_steps)-np.min(delta_time_steps) / avg_time_steps
# if the criterion is small, we have a constant time-step
if check_const_time_step < 1e-9:
time_steps[:] = avg_time_steps # if we have a constant time-step: apply the average time-step
opts.time_steps = time_steps
elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)):
Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization')
tf = np.sum(opts.time_steps)
if (tf - opts.tf) / tf > 1e-15:
raise Exception(f'Inconsistent discretization: {opts.tf}'\
f' = tf != sum(opts.time_steps) = {tf}.')
# num_steps
if isinstance(opts.sim_method_num_steps, np.ndarray) and opts.sim_method_num_steps.size == 1:
opts.sim_method_num_steps = opts.sim_method_num_steps.item()
if isinstance(opts.sim_method_num_steps, (int, float)) and opts.sim_method_num_steps % 1 == 0:
opts.sim_method_num_steps = opts.sim_method_num_steps * np.ones((dims.N,), dtype=np.int64)
elif isinstance(opts.sim_method_num_steps, np.ndarray) and opts.sim_method_num_steps.size == dims.N \
and np.all(np.equal(np.mod(opts.sim_method_num_steps, 1), 0)):
opts.sim_method_num_steps = np.reshape(opts.sim_method_num_steps, (dims.N,)).astype(np.int64)
else:
raise Exception("Wrong value for sim_method_num_steps. Should be either int or array of ints of shape (N,).")
# num_stages
if isinstance(opts.sim_method_num_stages, np.ndarray) and opts.sim_method_num_stages.size == 1:
opts.sim_method_num_stages = opts.sim_method_num_stages.item()
if isinstance(opts.sim_method_num_stages, (int, float)) and opts.sim_method_num_stages % 1 == 0:
opts.sim_method_num_stages = opts.sim_method_num_stages * np.ones((dims.N,), dtype=np.int64)
elif isinstance(opts.sim_method_num_stages, np.ndarray) and opts.sim_method_num_stages.size == dims.N \
and np.all(np.equal(np.mod(opts.sim_method_num_stages, 1), 0)):
opts.sim_method_num_stages = np.reshape(opts.sim_method_num_stages, (dims.N,)).astype(np.int64)
else:
raise Exception("Wrong value for sim_method_num_stages. Should be either int or array of ints of shape (N,).")
# jac_reuse
if isinstance(opts.sim_method_jac_reuse, np.ndarray) and opts.sim_method_jac_reuse.size == 1:
opts.sim_method_jac_reuse = opts.sim_method_jac_reuse.item()
if isinstance(opts.sim_method_jac_reuse, (int, float)) and opts.sim_method_jac_reuse % 1 == 0:
opts.sim_method_jac_reuse = opts.sim_method_jac_reuse * np.ones((dims.N,), dtype=np.int64)
elif isinstance(opts.sim_method_jac_reuse, np.ndarray) and opts.sim_method_jac_reuse.size == dims.N \
and np.all(np.equal(np.mod(opts.sim_method_jac_reuse, 1), 0)):
opts.sim_method_jac_reuse = np.reshape(opts.sim_method_jac_reuse, (dims.N,)).astype(np.int64)
else:
raise Exception("Wrong value for sim_method_jac_reuse. Should be either int or array of ints of shape (N,).")
def get_ocp_nlp_layout():
current_module = sys.modules[__name__]
acados_path = os.path.dirname(current_module.__file__)
with open(acados_path + '/acados_layout.json', 'r') as f:
ocp_nlp_layout = json.load(f)
return ocp_nlp_layout
def ocp_formulation_json_dump(acados_ocp, simulink_opts, json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
# Copy input ocp object dictionary
ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__)
# TODO: maybe make one function with formatting
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
# setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__))
# Copy ocp object attributes dictionaries
ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__)
ocp_nlp_dict = format_class_dict(ocp_nlp_dict)
# strip symbolics
ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model'])
# strip shooting_nodes
ocp_nlp_dict['solver_options'].pop('shooting_nodes', None)
dims_dict = acados_class2dict(acados_ocp.dims)
ocp_check_against_layout(ocp_nlp_dict, dims_dict)
# add simulink options
ocp_nlp_dict['simulink_opts'] = simulink_opts
with open(json_file, 'w') as f:
json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True)
def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'):
# Load acados_ocp_nlp structure description
ocp_layout = get_ocp_nlp_layout()
with open(json_file, 'r') as f:
ocp_nlp_json = json.load(f)
ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims'])
# Instantiate AcadosOcp object
acados_ocp = AcadosOcp()
# load class dict
acados_ocp.__dict__ = ocp_nlp_dict
# laod class attributes dict, dims, constraints, etc
for acados_struct, v in ocp_layout.items():
# skip non dict attributes
if not isinstance(v, dict): continue
acados_attribute = getattr(acados_ocp, acados_struct)
acados_attribute.__dict__ = ocp_nlp_dict[acados_struct]
setattr(acados_ocp, acados_struct, acados_attribute)
return acados_ocp
def ocp_generate_external_functions(acados_ocp, model):
model = make_model_consistent(model)
if acados_ocp.solver_options.hessian_approx == 'EXACT':
opts = dict(generate_hess=1)
else:
opts = dict(generate_hess=0)
code_export_dir = acados_ocp.code_export_directory
opts['code_export_directory'] = code_export_dir
if acados_ocp.model.dyn_ext_fun_type != 'casadi':
raise Exception("ocp_generate_external_functions: dyn_ext_fun_type only supports 'casadi' for now.\
Extending the Python interface with generic function support is welcome.")
if acados_ocp.solver_options.integrator_type == 'ERK':
# explicit model -- generate C code
generate_c_code_explicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'IRK':
# implicit model -- generate C code
generate_c_code_implicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'LIFTED_IRK':
generate_c_code_implicit_ode(model, opts)
elif acados_ocp.solver_options.integrator_type == 'GNSF':
generate_c_code_gnsf(model, opts)
elif acados_ocp.solver_options.integrator_type == 'DISCRETE':
generate_c_code_discrete_dynamics(model, opts)
else:
raise Exception("ocp_generate_external_functions: unknown integrator type.")
if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0:
generate_c_code_constraint(model, model.name, False, opts)
if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0:
generate_c_code_constraint(model, model.name, True, opts)
# dummy matrices
if not acados_ocp.cost.cost_type_0 == 'LINEAR_LS':
acados_ocp.cost.Vx_0 = np.zeros((acados_ocp.dims.ny_0, acados_ocp.dims.nx))
acados_ocp.cost.Vu_0 = np.zeros((acados_ocp.dims.ny_0, acados_ocp.dims.nu))
if not acados_ocp.cost.cost_type == 'LINEAR_LS':
acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx))
acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu))
if not acados_ocp.cost.cost_type_e == 'LINEAR_LS':
acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx))
if acados_ocp.cost.cost_type_0 == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, 'initial', opts)
elif acados_ocp.cost.cost_type_0 == 'EXTERNAL':
generate_c_code_external_cost(model, 'initial', opts)
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, 'path', opts)
elif acados_ocp.cost.cost_type == 'EXTERNAL':
generate_c_code_external_cost(model, 'path', opts)
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
generate_c_code_nls_cost(model, model.name, 'terminal', opts)
elif acados_ocp.cost.cost_type_e == 'EXTERNAL':
generate_c_code_external_cost(model, 'terminal', opts)
def ocp_render_templates(acados_ocp, json_file):
name = acados_ocp.model.name
# setting up loader and environment
json_path = '{cwd}/{json_file}'.format(
cwd=os.getcwd(),
json_file=json_file)
if not os.path.exists(json_path):
raise Exception('{} not found!'.format(json_path))
code_export_dir = acados_ocp.code_export_directory
template_dir = code_export_dir
## Render templates
in_file = 'main.in.c'
out_file = f'main_{name}.c'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.c'
out_file = f'acados_solver_{name}.c'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver.in.h'
out_file = f'acados_solver_{name}.h'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'Makefile.in'
out_file = 'Makefile'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_solver_sfun.in.c'
out_file = f'acados_solver_sfunction_{name}.c'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'make_sfun.in.m'
out_file = f'make_sfun_{name}.m'
render_template(in_file, out_file, template_dir, json_path)
# sim
in_file = 'acados_sim_solver.in.c'
out_file = f'acados_sim_solver_{name}.c'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'acados_sim_solver.in.h'
out_file = f'acados_sim_solver_{name}.h'
render_template(in_file, out_file, template_dir, json_path)
in_file = 'main_sim.in.c'
out_file = f'main_sim_{name}.c'
render_template(in_file, out_file, template_dir, json_path)
## folder model
template_dir = f'{code_export_dir}/{name}_model/'
in_file = 'model.in.h'
out_file = f'{name}_model.h'
render_template(in_file, out_file, template_dir, json_path)
# constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0:
# constraints on outer function
template_dir = f'{code_export_dir}/{name}_constraints/'
in_file = 'phi_constraint.in.h'
out_file = f'{name}_phi_constraint.h'
render_template(in_file, out_file, template_dir, json_path)
# terminal constraints on convex over nonlinear function
if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0:
# terminal constraints on outer function
template_dir = f'{code_export_dir}/{name}_constraints/'
in_file = 'phi_e_constraint.in.h'
out_file = f'{name}_phi_e_constraint.h'
render_template(in_file, out_file, template_dir, json_path)
# nonlinear constraints
if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0:
template_dir = f'{code_export_dir}/{name}_constraints/'
in_file = 'h_constraint.in.h'
out_file = f'{name}_h_constraint.h'
render_template(in_file, out_file, template_dir, json_path)
# terminal nonlinear constraints
if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0:
template_dir = f'{code_export_dir}/{name}_constraints/'
in_file = 'h_e_constraint.in.h'
out_file = f'{name}_h_e_constraint.h'
render_template(in_file, out_file, template_dir, json_path)
# initial stage Nonlinear LS cost function
if acados_ocp.cost.cost_type_0 == 'NONLINEAR_LS':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'cost_y_0_fun.in.h'
out_file = f'{name}_cost_y_0_fun.h'
render_template(in_file, out_file, template_dir, json_path)
# external cost - terminal
elif acados_ocp.cost.cost_type_0 == 'EXTERNAL':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'external_cost_0.in.h'
out_file = f'{name}_external_cost_0.h'
render_template(in_file, out_file, template_dir, json_path)
# path Nonlinear LS cost function
if acados_ocp.cost.cost_type == 'NONLINEAR_LS':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'cost_y_fun.in.h'
out_file = f'{name}_cost_y_fun.h'
render_template(in_file, out_file, template_dir, json_path)
# terminal Nonlinear LS cost function
if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'cost_y_e_fun.in.h'
out_file = f'{name}_cost_y_e_fun.h'
render_template(in_file, out_file, template_dir, json_path)
# external cost
if acados_ocp.cost.cost_type == 'EXTERNAL':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'external_cost.in.h'
out_file = f'{name}_external_cost.h'
render_template(in_file, out_file, template_dir, json_path)
# external cost - terminal
if acados_ocp.cost.cost_type_e == 'EXTERNAL':
template_dir = f'{code_export_dir}/{name}_cost/'
in_file = 'external_cost_e.in.h'
out_file = f'{name}_external_cost_e.h'
render_template(in_file, out_file, template_dir, json_path)
def remove_x0_elimination(acados_ocp):
acados_ocp.constraints.idxbxe_0 = np.zeros((0,))
acados_ocp.dims.nbxe_0 = 0
class AcadosOcpSolver:
"""
Class to interact with the acados ocp solver C object.
:param acados_ocp: type AcadosOcp - description of the OCP for acados
:param json_file: name for the json file used to render the templated code - default: acados_ocp_nlp.json
:param simulink_opts: Options to configure Simulink S-function blocks, mainly to activate possible Inputs and Outputs
:param build: Option to disable rendering templates and compiling if previously built - default: True
"""
if sys.platform=="win32":
from ctypes import wintypes
dlclose = WinDLL('kernel32', use_last_error=True).FreeLibrary
dlclose.argtypes = [wintypes.HMODULE]
else:
dlclose = CDLL(None).dlclose
dlclose.argtypes = [c_void_p]
def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json', simulink_opts=None, build=True):
self.solver_created = False
self.N = acados_ocp.dims.N
model = acados_ocp.model
if simulink_opts == None:
acados_path = get_acados_path()
json_path = os.path.join(acados_path, 'interfaces/acados_template/acados_template')
json_filename = json_path + '/simulink_default_opts.json'
print(f'using simulink json-file: "{json_filename}"')
with open(json_filename, 'r') as f:
simulink_opts = json.load(f)
# make dims consistent
make_ocp_dims_consistent(acados_ocp)
# module dependent post processing
if acados_ocp.solver_options.integrator_type == 'GNSF':
set_up_imported_gnsf_model(acados_ocp)
if acados_ocp.solver_options.qp_solver == 'PARTIAL_CONDENSING_QPDUNES':
remove_x0_elimination(acados_ocp)
# set integrator time automatically
acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0]
# generate external functions
ocp_generate_external_functions(acados_ocp, model)
# dump to json
ocp_formulation_json_dump(acados_ocp, simulink_opts, json_file)
if build:
# render templates
ocp_render_templates(acados_ocp, json_file)
## Compile solver
code_export_dir = acados_ocp.code_export_directory
cwd=os.getcwd()
os.chdir(code_export_dir)
os.system('make clean_ocp_shared_lib')
os.system('make ocp_shared_lib')
os.chdir(cwd)
self.shared_lib_name = f'{code_export_dir}/libacados_ocp_solver_{model.name}.so'
# get shared_lib
self.shared_lib = CDLL(self.shared_lib_name)
# create capsule
getattr(self.shared_lib, f"{model.name}_acados_create_capsule").restype = c_void_p
self.capsule = getattr(self.shared_lib, f"{model.name}_acados_create_capsule")()
# create solver
getattr(self.shared_lib, f"{model.name}_acados_create").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_create").restype = c_int
assert getattr(self.shared_lib, f"{model.name}_acados_create")(self.capsule)==0
self.solver_created = True
self.acados_ocp = acados_ocp
# get pointers solver
self.__get_pointers_solver()
def __get_pointers_solver(self):
"""
Private function to get the pointers for solver
"""
# get pointers solver
model = self.acados_ocp.model
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_opts").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_opts").restype = c_void_p
self.nlp_opts = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_opts")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_dims").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_dims").restype = c_void_p
self.nlp_dims = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_dims")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_config").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_config").restype = c_void_p
self.nlp_config = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_config")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_out").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_out").restype = c_void_p
self.nlp_out = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_out")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_in").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_in").restype = c_void_p
self.nlp_in = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_in")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_solver").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_get_nlp_solver").restype = c_void_p
self.nlp_solver = getattr(self.shared_lib, f"{model.name}_acados_get_nlp_solver")(self.capsule)
def solve(self):
"""
Solve the ocp with current input.
"""
model = self.acados_ocp.model
getattr(self.shared_lib, f"{model.name}_acados_solve").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_solve").restype = c_int
status = getattr(self.shared_lib, f"{model.name}_acados_solve")(self.capsule)
return status
def set_new_time_steps(self, new_time_steps):
"""
Set new time steps before solving. Only reload library without code generation but with new time steps.
:param new_time_steps: vector of new time steps for the solver
.. note:: This allows for different use-cases: either set a new size of time-steps or a new distribution of
the shooting nodes without changing the number, e.g., to reach a different final time. Both cases
do not require a new code export and compilation.
"""
# unlikely but still possible
if not self.solver_created:
raise Exception('Solver was not yet created!')
# check if time steps really changed in value
if np.array_equal(self.acados_ocp.solver_options.time_steps, new_time_steps):
return
N = new_time_steps.size
model = self.acados_ocp.model
new_time_steps_data = cast(new_time_steps.ctypes.data, POINTER(c_double))
# check if recreation of acados is necessary (no need to recreate acados if sizes are identical)
if self.acados_ocp.solver_options.time_steps.size == N:
getattr(self.shared_lib, f"{model.name}_acados_update_time_steps").argtypes = [c_void_p, c_int, c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_update_time_steps").restype = c_int
assert getattr(self.shared_lib, f"{model.name}_acados_update_time_steps")(self.capsule, N, new_time_steps_data) == 0
else: # recreate the solver with the new time steps
self.solver_created = False
# delete old memory (analog to __del__)
getattr(self.shared_lib, f"{model.name}_acados_free").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_free").restype = c_int
getattr(self.shared_lib, f"{model.name}_acados_free")(self.capsule)
# store N and new time steps
self.N = self.acados_ocp.dims.N = N
self.acados_ocp.solver_options.time_steps = new_time_steps
self.acados_ocp.solver_options.Tsim = self.acados_ocp.solver_options.time_steps[0]
# create solver with new time steps
getattr(self.shared_lib, f"{model.name}_acados_create_with_discretization").argtypes = [c_void_p, c_int, c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_create_with_discretization").restype = c_int
assert getattr(self.shared_lib, f"{model.name}_acados_create_with_discretization")(self.capsule, N, new_time_steps_data) == 0
self.solver_created = True
# get pointers solver
self.__get_pointers_solver()
def get(self, stage_, field_):
"""
Get the last solution of the solver:
:param stage: integer corresponding to shooting node
:param field: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',]
.. note:: regarding lam, t: \n
the inequalities are internally organized in the following order: \n
[ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n
lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
.. note:: pi: multipliers for dynamics equality constraints \n
lam: multipliers for inequalities \n
t: slack variables corresponding to evaluation of all inequalities (at the solution) \n
sl: slack variables of soft lower inequality constraints \n
su: slack variables of soft upper inequality constraints \n
"""
out_fields = ['x', 'u', 'z', 'pi', 'lam', 't']
mem_fields = ['sl', 'su']
field = field_
field = field.encode('utf-8')
if (field_ not in out_fields + mem_fields):
raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\
\n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields))
if not isinstance(stage_, int):
raise Exception('AcadosOcpSolver.get(): stage index must be Integer.')
if stage_ < 0 or stage_ > self.N:
raise Exception('AcadosOcpSolver.get(): stage index must be in [0, N], got: {}.'.format(self.N))
if stage_ == self.N and field_ == 'pi':
raise Exception('AcadosOcpSolver.get(): field {} does not exist at final stage {}.'\
.format(field_, stage_))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
if (field_ in out_fields):
self.shared_lib.ocp_nlp_out_get.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_get(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, out_data)
elif field_ in mem_fields:
self.shared_lib.ocp_nlp_get_at_stage.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \
self.nlp_dims, self.nlp_solver, stage_, field, out_data)
return out
def print_statistics(self):
"""
prints statistics of previous solver run as a table:
- iter: iteration number
- res_stat: stationarity residual
- res_eq: residual wrt equality constraints (dynamics)
- res_ineq: residual wrt inequality constraints (constraints)
- res_comp: residual wrt complementarity conditions
- qp_stat: status of QP solver
- qp_iter: number of QP iterations
- qp_res_stat: stationarity residual of the last QP solution
- qp_res_eq: residual wrt equality constraints (dynamics) of the last QP solution
- qp_res_ineq: residual wrt inequality constraints (constraints) of the last QP solution
- qp_res_comp: residual wrt complementarity conditions of the last QP solution
"""
stat = self.get_stats("statistics")
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP':
print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter')
if stat.shape[0]>7:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \
int(stat[0][jj]), stat[1][jj], stat[2][jj], \
stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj])))
if stat.shape[0]>7:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj]))
print('\n')
elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
print('\niter\tqp_stat\tqp_iter')
if stat.shape[0]>3:
print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp')
for jj in range(stat.shape[1]):
print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj])))
if stat.shape[0]>3:
print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \
stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj]))
print('\n')
return
def store_iterate(self, filename='', overwrite=False):
"""
Stores the current iterate of the ocp solver in a json file.
:param filename: if not set, use model_name + timestamp + '.json'
:param overwrite: if false and filename exists add timestamp to filename
"""
if filename == '':
filename += self.acados_ocp.model.name + '_' + 'iterate' + '.json'
if not overwrite:
# append timestamp
if os.path.isfile(filename):
filename = filename[:-5]
filename += datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%S.%f') + '.json'
# get iterate:
solution = dict()
for i in range(self.N+1):
solution['x_'+str(i)] = self.get(i,'x')
solution['u_'+str(i)] = self.get(i,'u')
solution['z_'+str(i)] = self.get(i,'z')
solution['lam_'+str(i)] = self.get(i,'lam')
solution['t_'+str(i)] = self.get(i, 't')
solution['sl_'+str(i)] = self.get(i, 'sl')
solution['su_'+str(i)] = self.get(i, 'su')
for i in range(self.N):
solution['pi_'+str(i)] = self.get(i,'pi')
# save
with open(filename, 'w') as f:
json.dump(solution, f, default=np_array_to_list, indent=4, sort_keys=True)
print("stored current iterate in ", os.path.join(os.getcwd(), filename))
def load_iterate(self, filename):
"""
Loads the iterate stored in json file with filename into the ocp solver.
"""
if not os.path.isfile(filename):
raise Exception('load_iterate: failed, file does not exist: ' + os.path.join(os.getcwd(), filename))
with open(filename, 'r') as f:
solution = json.load(f)
for key in solution.keys():
(field, stage) = key.split('_')
self.set(int(stage), field, np.array(solution[key]))
def get_stats(self, field_):
"""
Get the information of the last solver call.
:param field: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter']
"""
fields = ['time_tot', # total cpu time previous call
'time_lin', # cpu time for linearization
'time_sim', # cpu time for integrator
'time_sim_ad', # cpu time for integrator contribution of external function calls
'time_sim_la', # cpu time for integrator contribution of linear algebra
'time_qp', # cpu time qp solution
'time_qp_solver_call', # cpu time inside qp solver (without converting the QP)
'time_qp_xcond',
'time_glob', # cpu time globalization
'time_reg', # cpu time regularization
'sqp_iter', # number of SQP iterations
'qp_iter', # vector of QP iterations for last SQP call
'statistics', # table with info about last iteration
'stat_m',
'stat_n',
]
field = field_
field = field.encode('utf-8')
if (field_ not in fields):
raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\
\n Possible values are {}. Exiting.'.format(fields, fields))
if field_ in ['sqp_iter', 'stat_m', 'stat_n']:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64)
out_data = cast(out.ctypes.data, POINTER(c_int64))
elif field_ == 'statistics':
sqp_iter = self.get_stats("sqp_iter")
stat_m = self.get_stats("stat_m")
stat_n = self.get_stats("stat_n")
min_size = min([stat_m, sqp_iter+1])
out = np.ascontiguousarray(
np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
elif field_ == 'qp_iter':
full_stats = self.get_stats('statistics')
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP':
out = full_stats[6, :]
elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
out = full_stats[2, :]
else:
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
if not field_ == 'qp_iter':
self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
return out
def get_cost(self):
"""
Returns the cost value of the current solution.
"""
# compute cost internally
self.shared_lib.ocp_nlp_eval_cost.argtypes = [c_void_p, c_void_p, c_void_p]
self.shared_lib.ocp_nlp_eval_cost(self.nlp_solver, self.nlp_in, self.nlp_out)
# create output array
out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
# call getter
self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p]
field = "cost_value".encode('utf-8')
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
return out[0]
def get_residuals(self):
"""
Returns an array of the form [res_stat, res_eq, res_ineq, res_comp].
"""
# compute residuals if RTI
if self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI':
self.shared_lib.ocp_nlp_eval_residuals.argtypes = [c_void_p, c_void_p, c_void_p]
self.shared_lib.ocp_nlp_eval_residuals(self.nlp_solver, self.nlp_in, self.nlp_out)
# create output array
out = np.ascontiguousarray(np.zeros((4, 1)), dtype=np.float64)
out_data = cast(out.ctypes.data, POINTER(c_double))
# call getters
self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p]
field = "res_stat".encode('utf-8')
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
out_data = cast(out[1].ctypes.data, POINTER(c_double))
field = "res_eq".encode('utf-8')
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
out_data = cast(out[2].ctypes.data, POINTER(c_double))
field = "res_ineq".encode('utf-8')
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
out_data = cast(out[3].ctypes.data, POINTER(c_double))
field = "res_comp".encode('utf-8')
self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data)
return out.flatten()
# Note: this function should not be used anymore, better use cost_set, constraints_set
def set(self, stage_, field_, value_):
"""
Set numerical data inside the solver.
:param stage: integer corresponding to shooting node
:param field: string in ['x', 'u', 'pi', 'lam', 't', 'p']
.. note:: regarding lam, t: \n
the inequalities are internally organized in the following order: \n
[ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n
lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi]
.. note:: pi: multipliers for dynamics equality constraints \n
lam: multipliers for inequalities \n
t: slack variables corresponding to evaluation of all inequalities (at the solution) \n
sl: slack variables of soft lower inequality constraints \n
su: slack variables of soft upper inequality constraints \n
"""
cost_fields = ['y_ref', 'yref']
constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu']
out_fields = ['x', 'u', 'pi', 'lam', 't', 'z']
mem_fields = ['sl', 'su']
# cast value_ to avoid conversion issues
if isinstance(value_, (float, int)):
value_ = np.array([value_])
value_ = value_.astype(float)
model = self.acados_ocp.model
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
# treat parameters separately
if field_ == 'p':
getattr(self.shared_lib, f"{model.name}_acados_update_params").argtypes = [c_void_p, c_int, POINTER(c_double)]
getattr(self.shared_lib, f"{model.name}_acados_update_params").restype = c_int
value_data = cast(value_.ctypes.data, POINTER(c_double))
assert getattr(self.shared_lib, f"{model.name}_acados_update_params")(self.capsule, stage, value_data, value_.shape[0])==0
else:
if field_ not in constraints_fields + cost_fields + out_fields + mem_fields:
raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\
\nPossible values are {}. Exiting.".format(field, \
constraints_fields + cost_fields + out_fields + ['p']))
self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p]
self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int
dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field)
if value_.shape[0] != dims:
msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_)
msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0])
raise Exception(msg)
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
if field_ in constraints_fields:
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in cost_fields:
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
elif field_ in out_fields:
self.shared_lib.ocp_nlp_out_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_out_set(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage, field, value_data_p)
elif field_ in mem_fields:
self.shared_lib.ocp_nlp_set.argtypes = \
[c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_set(self.nlp_config, \
self.nlp_solver, stage, field, value_data_p)
return
def cost_set(self, stage_, field_, value_, api='warn'):
"""
Set numerical data in the cost module of the solver.
:param stage: integer corresponding to shooting node
:param field: string, e.g. 'yref', 'W', 'ext_cost_num_hess'
:param value: of appropriate size
"""
# cast value_ to avoid conversion issues
if isinstance(value_, (float, int)):
value_ = np.array([value_])
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
elif len(value_shape) == 2:
if api=='old':
pass
elif api=='warn':
if not np.all(np.ravel(value_, order='F')==np.ravel(value_, order='K')):
raise Exception("Ambiguity in API detected.\n"
"Are you making an acados model from scrach? Add api='new' to cost_set and carry on.\n"
"Are you seeing this error suddenly in previously running code? Read on.\n"
" You are relying on a now-fixed bug in cost_set for field '{}'.\n".format(field_) +
" acados_template now correctly passes on any matrices to acados in column major format.\n" +
" Two options to fix this error: \n" +
" * Add api='old' to cost_set to restore old incorrect behaviour\n" +
" * Add api='new' to cost_set and remove any unnatural manipulation of the value argument " +
"such as non-mathematical transposes, reshaping, casting to fortran order, etc... " +
"If there is no such manipulation, then you have probably been getting an incorrect solution before.")
# Get elements in column major order
value_ = np.ravel(value_, order='F')
elif api=='new':
# Get elements in column major order
value_ = np.ravel(value_, order='F')
else:
raise Exception("Unknown api: '{}'".format(api))
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \
' for field "{}" with dimension {} (you have {})'.format( \
field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_cost_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def constraints_set(self, stage_, field_, value_, api='warn'):
"""
Set numerical data in the constraint module of the solver.
:param stage: integer corresponding to shooting node
:param field: string in ['lbx', 'ubx', 'lbu', 'ubu', 'lg', 'ug', 'lh', 'uh', 'uphi', 'C', 'D']
:param value: of appropriate size
"""
# cast value_ to avoid conversion issues
if isinstance(value_, (float, int)):
value_ = np.array([value_])
value_ = value_.astype(float)
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
value_shape = value_.shape
if len(value_shape) == 1:
value_shape = (value_shape[0], 0)
elif len(value_shape) == 2:
if api=='old':
pass
elif api=='warn':
if not np.all(np.ravel(value_, order='F')==np.ravel(value_, order='K')):
raise Exception("Ambiguity in API detected.\n"
"Are you making an acados model from scrach? Add api='new' to constraints_set and carry on.\n"
"Are you seeing this error suddenly in previously running code? Read on.\n"
" You are relying on a now-fixed bug in constraints_set for field '{}'.\n".format(field_) +
" acados_template now correctly passes on any matrices to acados in column major format.\n" +
" Two options to fix this error: \n" +
" * Add api='old' to constraints_set to restore old incorrect behaviour\n" +
" * Add api='new' to constraints_set and remove any unnatural manipulation of the value argument " +
"such as non-mathematical transposes, reshaping, casting to fortran order, etc... " +
"If there is no such manipulation, then you have probably been getting an incorrect solution before.")
# Get elements in column major order
value_ = np.ravel(value_, order='F')
elif api=='new':
# Get elements in column major order
value_ = np.ravel(value_, order='F')
else:
raise Exception("Unknown api: '{}'".format(api))
if value_shape != tuple(dims):
raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \
' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape))
value_data = cast(value_.ctypes.data, POINTER(c_double))
value_data_p = cast((value_data), c_void_p)
self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \
self.nlp_dims, self.nlp_in, stage, field, value_data_p)
return
def dynamics_get(self, stage_, field_):
"""
Get numerical data from the dynamics module of the solver:
:param stage: integer corresponding to shooting node
:param field: string, e.g. 'A'
"""
field = field_
field = field.encode('utf-8')
stage = c_int(stage_)
# get dims
self.shared_lib.ocp_nlp_dynamics_dims_get_from_attr.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)]
self.shared_lib.ocp_nlp_dynamics_dims_get_from_attr.restype = c_int
dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc)
dims_data = cast(dims.ctypes.data, POINTER(c_int))
self.shared_lib.ocp_nlp_dynamics_dims_get_from_attr(self.nlp_config, \
self.nlp_dims, self.nlp_out, stage_, field, dims_data)
# create output data
out = np.ascontiguousarray(np.zeros((np.prod(dims),)), dtype=np.float64)
out = out.reshape(dims[0], dims[1], order='F')
out_data = cast(out.ctypes.data, POINTER(c_double))
out_data_p = cast((out_data), c_void_p)
# call getter
self.shared_lib.ocp_nlp_get_at_stage.argtypes = \
[c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \
self.nlp_dims, self.nlp_solver, stage, field, out_data_p)
return out
def options_set(self, field_, value_):
"""
Set options of the solver.
:param field: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length', 'alpha_min', 'alpha_reduction'
:param value: of type int, float
"""
int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks']
double_fields = ['step_length', 'tol_eq', 'tol_stat', 'tol_ineq', 'tol_comp', 'alpha_min', 'alpha_reduction']
string_fields = ['globalization']
# check field availability and type
if field_ in int_fields:
if not isinstance(value_, int):
raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_int(value_)
elif field_ in double_fields:
if not isinstance(value_, float):
raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = c_double(value_)
elif field_ in string_fields:
if not isinstance(value_, str):
raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_)))
else:
value_ctypes = value_.encode('utf-8')
else:
raise Exception('AcadosOcpSolver.options_set() does not support field {}.'\
'\n Possible values are {}.'.format(field_, ', '.join(int_fields + double_fields + string_fields)))
if field_ == 'rti_phase':
if value_ < 0 or value_ > 2:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only values 0, 1, 2 for SQP-RTI-type solvers')
if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0:
raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can '
'take only value 0 for SQP-type solvers')
# encode
field = field_
field = field.encode('utf-8')
# call C interface
if field_ in string_fields:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_char_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, value_ctypes)
else:
self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \
[c_void_p, c_void_p, c_char_p, c_void_p]
self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \
self.nlp_opts, field, byref(value_ctypes))
return
def __del__(self):
model = self.acados_ocp.model
if self.solver_created:
getattr(self.shared_lib, f"{model.name}_acados_free").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_free").restype = c_int
getattr(self.shared_lib, f"{model.name}_acados_free")(self.capsule)
getattr(self.shared_lib, f"{model.name}_acados_free_capsule").argtypes = [c_void_p]
getattr(self.shared_lib, f"{model.name}_acados_free_capsule").restype = c_int
getattr(self.shared_lib, f"{model.name}_acados_free_capsule")(self.capsule)
try:
self.dlclose(self.shared_lib._handle)
except:
pass
| 44.473616
| 174
| 0.640019
|
bfd25fece7ec87a5f2068842445a60914d5a20fd
| 5,192
|
py
|
Python
|
aalh_iit_parksnature_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_parksnature_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_parksnature_001/cleanup-subjects-column.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_parksnature_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 8
maximumcol = 8
minimumrow = 7
maximumrow = 493
iterationrow = 7
targetcol = 9
placecol = 13
subjectholland = 'Images in time photographic collection. (Toledo Lucas County Public Library); Holland (Ohio). History. Photographs.'
subjectwaterville = 'Images in time photographic collection. (Toledo Lucas County Public Library); Waterville (Ohio). History. Photographs.'
subjectoregon = 'Images in time photographic collection. (Toledo Lucas County Public Library); Oregon (Ohio). History. Photographs.'
subjectmaumee = 'Images in time photographic collection. (Toledo Lucas County Public Library); Maumee (Ohio). History. Photographs.'
subjectsylvania = 'Images in time photographic collection. (Toledo Lucas County Public Library); Sylvania (Ohio). History. Photographs.'
subjecttoledo = 'Images in time photographic collection. (Toledo Lucas County Public Library); Toledo (Ohio). History. Photographs.'
subjectnonlucascounty = 'Images in time photographic collection. (Toledo Lucas County Public Library)'
semicolonspace = '; '
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
print(iterationrow)
testvar = ws.cell(row=iterationrow, column=targetcol).value
placevar = ws.cell(row=iterationrow, column=placecol).value
if testvar == None:
if placevar == None:
ws.cell(row=iterationrow, column=targetcol).value = subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Toledo (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjecttoledo
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Sylvania (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectsylvania
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Maumee (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectmaumee
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Oregon (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectoregon
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Waterville (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectwaterville
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Holland (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = subjectholland
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
ws.cell(row=iterationrow, column=targetcol).value = subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
if placevar == None:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Toledo') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjecttoledo
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Sylvania (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectsylvania
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Maumee (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectmaumee
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Oregon (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectoregon
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Waterville (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectwaterville
print(ws.cell(row=iterationrow, column=targetcol).value)
elif placevar.find('Holland (Ohio)') != -1:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectholland
print(ws.cell(row=iterationrow, column=targetcol).value)
else:
ws.cell(row=iterationrow, column=targetcol).value = testvar + semicolonspace + subjectnonlucascounty
print(ws.cell(row=iterationrow, column=targetcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_parksnature_001.xlsx')
| 64.098765
| 141
| 0.655624
|
ee4a93f0b5129423bcd922888084b19fb7042c65
| 896
|
py
|
Python
|
settings.py
|
glebb/eashl
|
1ee0e2a895ee266a7eca103dfa0f2c5ea35768cc
|
[
"MIT"
] | 1
|
2015-12-21T04:44:48.000Z
|
2015-12-21T04:44:48.000Z
|
settings.py
|
glebb/eashl
|
1ee0e2a895ee266a7eca103dfa0f2c5ea35768cc
|
[
"MIT"
] | 1
|
2017-04-24T14:08:55.000Z
|
2017-04-26T05:37:48.000Z
|
settings.py
|
glebb/eashl
|
1ee0e2a895ee266a7eca103dfa0f2c5ea35768cc
|
[
"MIT"
] | 4
|
2017-04-24T14:25:25.000Z
|
2022-02-23T19:00:41.000Z
|
HOME_TEAM = "219"
HOME_TEAM_NAME = "Murohoki"
PLATFORM = "ps4"
FETCH_MATCHES = "5"
MONGODBUSER = "eashl"
MONGODBPWD = "eashl"
DEBUG = False
MIN_GAMES_TO_SHOW_IN_STATS = 1
CLASSES = {"16200": "defensive defender",
"16201": "offensive defender",
"16202": "enforcer defender",
"16203": "two-way defender",
"16204": "grinder",
"16206": "sniper",
"16207": "power forward",
"16205": "playmaker",
"16208": "two-way forward",
"16209": "enforcer",
"16210": "goalie",
"16211": "goalie",
"16212": "goalie",
"16213": "dangler"}
PLAYERDATA = [
"playername",
"totalgp",
"skgoals",
"skgoalsavg",
"skassists",
"skassistsavg",
"skpoints",
"skpointsavg",
"skplusmin",
"skpim",
"skppg",
"skshg",
"skhits",
"skhitsavg",
"skbs",
"skshots",
"skshotsavg",
"skshotpct",
"glRatingOvr",
"glgaa",
"glga",
"glsaves",
"glsavepct",
"glso",
"glsoperiods"
]
POSITIONS = {"0": "G", "1": "D", "3": "LW", "4": "C", "5": "RW"}
| 16
| 64
| 0.639509
|
c82ff1af2c964acf96a49c9ec904d81cd6a8a57a
| 4,269
|
py
|
Python
|
tests/libtest/test_cluster_utils.py
|
tiffanyn108/ocs-ci
|
30350e0958d14100edeadbbc5f3fe557954a76b8
|
[
"MIT"
] | null | null | null |
tests/libtest/test_cluster_utils.py
|
tiffanyn108/ocs-ci
|
30350e0958d14100edeadbbc5f3fe557954a76b8
|
[
"MIT"
] | null | null | null |
tests/libtest/test_cluster_utils.py
|
tiffanyn108/ocs-ci
|
30350e0958d14100edeadbbc5f3fe557954a76b8
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from ocs_ci.framework.testlib import ManageTest, libtest
from ocs_ci.ocs.cluster import CephCluster
from tests import helpers
log = logging.getLogger(__name__)
@pytest.fixture(scope='class')
def test_fixture(request):
"""
Create disks
"""
self = request.node.cls
def finalizer():
teardown(self)
request.addfinalizer(finalizer)
setup(self)
@pytest.fixture
def mon_resource(request):
"""
A fixture to handle mon resource cleanup,
this function brings the mon count to what it was before test started
"""
self = request.node.cls
mon_count = self.cluster_obj.mon_count
log.info(f"Mon count before add = {mon_count}")
self.cluster_obj.scan_cluster()
self.cluster_obj.cluster.reload()
self.cluster_obj.cluster.data['spec']['mon']['allowMultiplePerNode'] = True
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
yield
self.cluster_obj.mon_change_count(mon_count)
if mon_count != self.cluster_obj.mon_count:
log.error(f"Mon teardown failure")
log.error(
f"Expected: {mon_count}",
f"but found {self.cluster_obj.mon_count}"
)
log.info("Removed mon")
self.cluster_obj.cluster.data['spec']['mon'][
'allowMultiplePerNode'
] = False
self.cluster_obj.cluster.apply(**self.cluster_obj.cluster.data)
@pytest.fixture
def mds_resource(request):
"""
A fixture to handle mds resource cleanup
This function brings mds count to what it was before test started
"""
self = request.node.cls
we_created_fs = False
if not self.cluster_obj.cephfs:
# cephfs doesn't exist , create one for this test
assert helpers.create_cephfilesystem()
self.cluster_obj.scan_cluster()
assert self.cluster_obj.cephfs
we_created_fs = True
mds_count = int(self.cluster_obj.mds_count / 2)
yield
self.cluster_obj.mds_change_count(mds_count)
current_count = int(self.cluster_obj.mds_count / 2)
if mds_count != current_count:
log.error(f"MDS teardown failure")
log.error(f"Expected: {mds_count} but found {current_count}")
if we_created_fs:
self.cluster_obj.cephfs.delete()
self.cluster_obj.cephfs = None
@pytest.fixture
def user_resource(request):
"""
A fixture for creating user for test and cleaning up after test is done
"""
self = request.node.cls
log.info("Creating user")
assert self.cluster_obj.create_user(self.username, self.caps)
yield
del_cmd = f"ceph auth del {self.username}"
log.info("User deleted")
self.cluster_obj.toolbox.exec_ceph_cmd(del_cmd)
def setup(self):
"""
Create CephCluster object to be consumed by tests
"""
self.cluster_obj = CephCluster()
def teardown(self):
"""
Make sure at the end cluster is in HEALTH_OK state
"""
assert self.cluster_obj.cluster_health_check(timeout=1200)
@libtest
@pytest.mark.usefixtures(
test_fixture.__name__,
)
class TestClusterUtils(ManageTest):
# Cluster will be populated in the fixture
username = "client.test"
caps = "mon 'allow r' osd 'allow rwx'"
def test_get_user_key(self, user_resource):
key = self.cluster_obj.get_user_key(self.username)
assert key
logging.info(key)
def test_get_admin_key(self):
"""
By default admin user will be created by rook
"""
key = self.cluster_obj.get_admin_key()
assert key
def test_get_mon_info(self):
for mon in self.cluster_obj.mons:
logging.info(mon.name)
logging.info(mon.port)
def test_add_mon(self, mon_resource):
cur_count = self.cluster_obj.mon_count
logging.info(f"current mon count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mon_change_count(new_count)
assert new_count == self.cluster_obj.mon_count
def test_add_mds(self, mds_resource):
cur_count = int(self.cluster_obj.mds_count / 2)
logging.info(f"Current active count = {cur_count}")
new_count = cur_count + 1
self.cluster_obj.mds_change_count(new_count)
assert new_count * 2 == self.cluster_obj.mds_count
| 29.239726
| 79
| 0.678613
|
325bddc968cb0a80d80ecbcde4bc43e81521a9ad
| 3,133
|
py
|
Python
|
Clustering:-Customer-Segmentation/code.py
|
akshatDongre/ga-learner-dsmp-repo
|
6e4d993c6d078b1f559b42f42d37d2c2100f62b8
|
[
"MIT"
] | null | null | null |
Clustering:-Customer-Segmentation/code.py
|
akshatDongre/ga-learner-dsmp-repo
|
6e4d993c6d078b1f559b42f42d37d2c2100f62b8
|
[
"MIT"
] | null | null | null |
Clustering:-Customer-Segmentation/code.py
|
akshatDongre/ga-learner-dsmp-repo
|
6e4d993c6d078b1f559b42f42d37d2c2100f62b8
|
[
"MIT"
] | null | null | null |
# --------------
# import packages
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Load Offers
offers = pd.read_excel(path, sheet_name=0)
transactions=pd.read_excel(path, sheet_name=1)
transactions['n']=1
df=pd.merge(offers, transactions)
print(df.head())
# Load Transactions
# Merge dataframes
# Look at the first 5 rows
# --------------
# Code starts here
matrix= pd.pivot_table(df,index='Customer Last Name', columns='Offer #', values='n')
matrix.fillna(0, inplace=True)
matrix.reset_index(inplace=True)
print(matrix.head(5))
# create pivot table
# replace missing values with 0
# reindex pivot table
# display first 5 rows
# Code ends here
# --------------
# import packages
from sklearn.cluster import KMeans
# Code starts here
cluster = KMeans(n_clusters=5, init='k-means++', max_iter=300, n_init=10, random_state=0)
matrix['cluster']=cluster.fit_predict(matrix[matrix.columns[1:]])
print(matrix.head())
# initialize KMeans object
# create 'cluster' column
# Code ends here
# --------------
# import packages
from sklearn.decomposition import PCA
# Code starts here
pca = PCA(n_components=2, random_state=0)
matrix['x']=pca.fit_transform(matrix[matrix.columns[1:]])[:,0]
matrix['y']=pca.fit_transform(matrix[matrix.columns[1:]])[:,1]
clusters =matrix.iloc[:,[0,33,34,35]]
clusters.plot.scatter(x='x',y='y',c='cluster',colormap='viridis')
# initialize pca object with 2 components
# create 'x' and 'y' columns donoting observation locations in decomposed form
# dataframe to visualize clusters by customer names
# visualize clusters
# Code ends here
# --------------
# Code starts here
data = clusters.merge(transactions)
data = offers.merge(data)
data.head()
champagne = {}
for i in range(0,5):
champagne[i]=0
new_df =data[data['cluster']==i]
counts = new_df['Varietal'].value_counts(ascending=False)
# check if 'Champagne' is ordered mostly
print(i)
print(counts.index[0])
if (counts.index[0]=='Champagne'):
champagne[i]=counts[0]
# add it to 'champagne'
print(champagne)
cluster_champagne= max(champagne, key=lambda k: champagne[k])
print(cluster_champagne)
# merge 'clusters' and 'transactions'
# merge `data` and `offers`
# initialzie empty dictionary
# iterate over every cluster
# observation falls in that cluster
# sort cluster according to type of 'Varietal'
# check if 'Champagne' is ordered mostly
# add it to 'champagne'
# get cluster with maximum orders of 'Champagne'
# print out cluster number
# --------------
# Code starts here
discount={}
# iterate over cluster numbers
for i in range(0,5):
# dataframe for every cluster
new_df=data[data['cluster']==i]
# average discount for cluster
print(new_df['Discount (%)'].sum()/len(new_df))
counts=new_df['Discount (%)'].sum()/len(new_df)
# adding cluster number as key and average discount as value
discount[i]=counts
# cluster with maximum average discount
cluster_discount=max(discount, key=lambda k: discount[k])
print(cluster_discount)
# Code ends here
| 19.220859
| 89
| 0.691989
|
118a5c5b02531ba8eb24b3cf9c408cfa3c4252d2
| 398
|
py
|
Python
|
habu/cli/cmd_xor.py
|
NanoGitHub/habu
|
5f0c5ed40ae5fdbbc0d3c317b75e447f4aa6a740
|
[
"BSD-3-Clause"
] | 1
|
2018-11-02T05:42:08.000Z
|
2018-11-02T05:42:08.000Z
|
habu/cli/cmd_xor.py
|
seclib/habu
|
7e8106ddec07e08b994c290a38ce7f7f662c0e1c
|
[
"BSD-3-Clause"
] | null | null | null |
habu/cli/cmd_xor.py
|
seclib/habu
|
7e8106ddec07e08b994c290a38ce7f7f662c0e1c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-14T07:34:20.000Z
|
2019-05-14T07:34:20.000Z
|
import click
from habu.lib.xor import xor
@click.command()
@click.option('-k', default='0', help='Encryption key')
@click.option('-i', type=click.File('rb'), required=True, help='Input file')
@click.option('-o', type=click.File('wb'), required=True, help='Output file')
def cmd_xor(k, i, o):
"""XOR cipher"""
o.write(xor(i.read(), k.encode()))
if __name__ == '__main__':
cmd_xor()
| 23.411765
| 77
| 0.643216
|
dd9a634a4959ac63041ae9b636a2e27083eb5e70
| 249
|
py
|
Python
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/header_postfx.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | 1
|
2021-01-17T06:21:36.000Z
|
2021-01-17T06:21:36.000Z
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/header_postfx.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | 2
|
2021-01-17T12:57:37.000Z
|
2021-02-08T02:16:45.000Z
|
Modules/Gekokujo_vanilla_enhanced/Code/Module_system/header_postfx.py
|
roalyr/gekokujo_vanilla_enhanced
|
84d8cc1033be98357ac139fafbc1c10851274019
|
[
"MIT"
] | null | null | null |
###################################################
# header_postfx_params.py
# This file contains declarations for postfx_params
# DO NOT EDIT THIS FILE!
###################################################
fxf_highhdr = 0x00000001
| 27.666667
| 52
| 0.417671
|
65014f269c244d916cefa081e8f7feee79c4a85f
| 4,366
|
py
|
Python
|
examples/test_pcrnet.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 335
|
2020-05-17T19:37:47.000Z
|
2022-03-29T09:32:14.000Z
|
examples/test_pcrnet.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 13
|
2020-06-08T05:28:03.000Z
|
2022-03-29T07:46:18.000Z
|
examples/test_pcrnet.py
|
RerRayne/learn3d
|
83e4ac657c6538fb4cbed6e00b2e3ed6cbf43555
|
[
"MIT"
] | 59
|
2020-06-27T09:01:29.000Z
|
2022-03-21T07:22:09.000Z
|
import open3d as o3d
import argparse
import os
import sys
import logging
import numpy
import numpy as np
import torch
import torch.utils.data
import torchvision
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
# Only if the files are in example folder.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if BASE_DIR[-8:] == 'examples':
sys.path.append(os.path.join(BASE_DIR, os.pardir))
os.chdir(os.path.join(BASE_DIR, os.pardir))
from learning3d.models import PointNet, iPCRNet
from learning3d.losses import ChamferDistanceLoss
from learning3d.data_utils import RegistrationData, ModelNet40Data
def display_open3d(template, source, transformed_source):
template_ = o3d.geometry.PointCloud()
source_ = o3d.geometry.PointCloud()
transformed_source_ = o3d.geometry.PointCloud()
template_.points = o3d.utility.Vector3dVector(template)
source_.points = o3d.utility.Vector3dVector(source + np.array([0,0,0]))
transformed_source_.points = o3d.utility.Vector3dVector(transformed_source)
template_.paint_uniform_color([1, 0, 0])
source_.paint_uniform_color([0, 1, 0])
transformed_source_.paint_uniform_color([0, 0, 1])
o3d.visualization.draw_geometries([template_, source_, transformed_source_])
def test_one_epoch(device, model, test_loader):
model.eval()
test_loss = 0.0
pred = 0.0
count = 0
for i, data in enumerate(tqdm(test_loader)):
template, source, igt = data
template = template.to(device)
source = source.to(device)
igt = igt.to(device)
output = model(template, source)
display_open3d(template.detach().cpu().numpy()[0], source.detach().cpu().numpy()[0], output['transformed_source'].detach().cpu().numpy()[0])
loss_val = ChamferDistanceLoss()(template, output['transformed_source'])
test_loss += loss_val.item()
count += 1
test_loss = float(test_loss)/count
return test_loss
def test(args, model, test_loader):
test_loss, test_accuracy = test_one_epoch(args.device, model, test_loader)
def options():
parser = argparse.ArgumentParser(description='Point Cloud Registration')
parser.add_argument('--exp_name', type=str, default='exp_ipcrnet', metavar='N',
help='Name of the experiment')
parser.add_argument('--dataset_path', type=str, default='ModelNet40',
metavar='PATH', help='path to the input dataset') # like '/path/to/ModelNet40'
parser.add_argument('--eval', type=bool, default=False, help='Train or Evaluate the network.')
# settings for input data
parser.add_argument('--dataset_type', default='modelnet', choices=['modelnet', 'shapenet2'],
metavar='DATASET', help='dataset type (default: modelnet)')
parser.add_argument('--num_points', default=1024, type=int,
metavar='N', help='points in point-cloud (default: 1024)')
# settings for PointNet
parser.add_argument('--emb_dims', default=1024, type=int,
metavar='K', help='dim. of the feature vector (default: 1024)')
parser.add_argument('--symfn', default='max', choices=['max', 'avg'],
help='symmetric function (default: max)')
# settings for on training
parser.add_argument('-j', '--workers', default=4, type=int,
metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch_size', default=20, type=int,
metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--pretrained', default='learning3d/pretrained/exp_ipcrnet/models/best_model.t7', type=str,
metavar='PATH', help='path to pretrained model file (default: null (no-use))')
parser.add_argument('--device', default='cuda:0', type=str,
metavar='DEVICE', help='use CUDA if available')
args = parser.parse_args()
return args
def main():
args = options()
testset = RegistrationData('PCRNet', ModelNet40Data(train=False))
test_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, drop_last=False, num_workers=args.workers)
if not torch.cuda.is_available():
args.device = 'cpu'
args.device = torch.device(args.device)
# Create PointNet Model.
ptnet = PointNet(emb_dims=args.emb_dims)
model = iPCRNet(feature_model=ptnet)
model = model.to(args.device)
if args.pretrained:
assert os.path.isfile(args.pretrained)
model.load_state_dict(torch.load(args.pretrained, map_location='cpu'))
model.to(args.device)
test(args, model, test_loader)
if __name__ == '__main__':
main()
| 36.383333
| 144
| 0.740266
|
504d5e1c541dfeccbee0c152786aca7805ca01e9
| 1,332
|
py
|
Python
|
provisioners/ansible/library/stack_facter_facts.py
|
PradKhandelwal/aem-aws-stack-builder
|
27ef24c0b42b921c622791ce579e91e3ed08912f
|
[
"Apache-2.0"
] | 36
|
2017-01-17T01:40:08.000Z
|
2022-03-11T18:09:53.000Z
|
provisioners/ansible/library/stack_facter_facts.py
|
PradKhandelwal/aem-aws-stack-builder
|
27ef24c0b42b921c622791ce579e91e3ed08912f
|
[
"Apache-2.0"
] | 192
|
2017-01-24T01:53:39.000Z
|
2021-10-21T03:11:36.000Z
|
provisioners/ansible/library/stack_facter_facts.py
|
PradKhandelwal/aem-aws-stack-builder
|
27ef24c0b42b921c622791ce579e91e3ed08912f
|
[
"Apache-2.0"
] | 50
|
2017-01-19T04:44:46.000Z
|
2021-08-16T05:21:06.000Z
|
#!/usr/bin/python3
from ansible.module_utils.basic import *
import random
import string
def generate_facts(params):
return """stack_prefix=%s
aws_region=%s
cron_env_path=%s
cron_http_proxy=%s
cron_https_proxy=%s
cron_no_proxy=%s
publish_dispatcher_allowed_client=%s
""" % (
params['stack_prefix'],
params['aws_region'],
params['cron_env_path'],
params['cron_http_proxy'],
params['cron_https_proxy'],
params['cron_no_proxy'],
params['publish_dispatcher_allowed_client'],
)
def main():
module = AnsibleModule(
argument_spec = dict(
stack_prefix = dict(required=True, type='str'),
aws_region = dict(required=True, type='str'),
cron_env_path = dict(required=True, type='str'),
cron_http_proxy = dict(required=True, type='str'),
cron_https_proxy = dict(required=True, type='str'),
cron_no_proxy = dict(required=True, type='str'),
publish_dispatcher_allowed_client = dict(required=True, type='str'),
)
)
response = generate_facts(module.params)
module.exit_json(changed = False, meta = response)
if __name__ == '__main__':
main()
| 30.976744
| 76
| 0.588589
|
677eeb16b0635ad8c6a12e8921d99fc5b40ff69d
| 468
|
py
|
Python
|
Python/ex52.py
|
Anderson0312/Python
|
1fd225378c55309640d584a4894393f7c40dc9ed
|
[
"MIT"
] | 1
|
2022-02-01T17:59:50.000Z
|
2022-02-01T17:59:50.000Z
|
Python/ex52.py
|
Anderson0312/Python
|
1fd225378c55309640d584a4894393f7c40dc9ed
|
[
"MIT"
] | null | null | null |
Python/ex52.py
|
Anderson0312/Python
|
1fd225378c55309640d584a4894393f7c40dc9ed
|
[
"MIT"
] | null | null | null |
lista = list()
while True:
lista.append(int(input('Digite um valor: ')))
soun = str(input('Quer continuar? [S/N] '))
print('-=' * 30)
if soun in 'Nn':
break
lista.sort(reverse=True)
print(f'Você digitou {len(lista)} elementos.')
print('-=' * 30)
print('Os valores em ordem decrescente são {}'.format(lista))
print('-=' * 30)
if 0 >= lista.count(5):
print('O valor 5 não faz parte da lista!')
else:
print('O valor 5 faz parte da lista!')
| 29.25
| 61
| 0.615385
|
8b6f6925b5d69016e0aa0e747234403153748d5a
| 323
|
py
|
Python
|
Project/Server/McServer/backendapp/serializers.py
|
Tchinmai7/CSE535
|
d2983355656f66070e840408a0825fae6de33df1
|
[
"MIT"
] | null | null | null |
Project/Server/McServer/backendapp/serializers.py
|
Tchinmai7/CSE535
|
d2983355656f66070e840408a0825fae6de33df1
|
[
"MIT"
] | 1
|
2022-02-10T12:51:12.000Z
|
2022-02-10T12:51:12.000Z
|
Project/Server/McServer/backendapp/serializers.py
|
Tchinmai7/CSE535
|
d2983355656f66070e840408a0825fae6de33df1
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import User
class LoginSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = "__all__"
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('UserName', 'UserSignalFile')
| 23.071429
| 54
| 0.702786
|
d6f1bfe86e8d779215622cb65d53cf23b089ffca
| 2,121
|
py
|
Python
|
test/scripts/test_sequana_coverage.py
|
brwnj/sequana
|
58f6ca47815bf7253f27e4631d971a0a479c3a63
|
[
"BSD-3-Clause"
] | null | null | null |
test/scripts/test_sequana_coverage.py
|
brwnj/sequana
|
58f6ca47815bf7253f27e4631d971a0a479c3a63
|
[
"BSD-3-Clause"
] | null | null | null |
test/scripts/test_sequana_coverage.py
|
brwnj/sequana
|
58f6ca47815bf7253f27e4631d971a0a479c3a63
|
[
"BSD-3-Clause"
] | 1
|
2019-10-11T18:21:05.000Z
|
2019-10-11T18:21:05.000Z
|
from sequana.scripts import coverage
from sequana import sequana_data
import pytest
prog = "sequana_coverage"
@pytest.fixture
def coveragefix():
import os
# local nosetests execution
try:os.remove('README')
except:pass
try:os.remove('quality.rules')
except:pass
try:os.remove('config.yaml')
except:pass
def test_version():
try:
coverage.main([prog, '--version'])
assert False
except SystemExit:
pass
else:
raise Exception
def test_help():
try:
coverage.main([prog, '--help'])
assert False
except SystemExit:
pass
else:
raise Exception
def test_input(tmpdir):
import os
# Download reference in temporary directory so that it is erased if the test
# fails.
directory_data = tmpdir.mkdir("datatemp")
cwd = os.getcwd()
try:
os.chdir(directory_data.__str__())
coverage.main([prog, '--download-reference', "JB409847"])
os.system("""sed -i s"/>ENA/>JB409847 /" %s/JB409847.fa """ % directory_data.__str__())
coverage.main([prog, '--download-genbank', "JB409847"])
except Exception:
raise Exception
finally:
os.chdir(cwd)
directory_run = tmpdir.mkdir("report")
filename = sequana_data('JB409847.bed')
try:
coverage.main([prog, '-i', filename, "-o", "--output-directory",
directory_run.__str__(), "-r",
"%s/JB409847.fa" % directory_data.__str__()])
assert False
except Exception as err:
print(err)
assert True
print(os.listdir(directory_run.__str__()))
assert os.path.exists(directory_run.__str__() + os.sep + "sequana_coverage.html")
try:
coverage.main([prog, '-i', filename, "-o", "--output-directory",
directory_run.__str__(), "-r",
"%s/JB409847.fa" % directory_data.__str__(), '-c', '1'])
assert False
except Exception as err:
print(err)
assert True
assert os.path.exists(str(directory_run) + os.sep + 'JB409847.cov.html')
| 27.192308
| 95
| 0.600189
|
5dda0948901fbc05f782713a8e50032d96f68462
| 551
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/pointcloud/stream/_maxpoints.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/pointcloud/stream/_maxpoints.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/pointcloud/stream/_maxpoints.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="pointcloud.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
| 32.411765
| 80
| 0.615245
|
e834dda3a9ae105c4f4445e6dcb858fac6984d98
| 548
|
py
|
Python
|
exams/2018-06-28/testo-esame/esercizio1.py
|
CNardin/learning-python
|
407b35c3bb045477b823fb0fd8cf7ae05d92ef77
|
[
"MIT"
] | null | null | null |
exams/2018-06-28/testo-esame/esercizio1.py
|
CNardin/learning-python
|
407b35c3bb045477b823fb0fd8cf7ae05d92ef77
|
[
"MIT"
] | null | null | null |
exams/2018-06-28/testo-esame/esercizio1.py
|
CNardin/learning-python
|
407b35c3bb045477b823fb0fd8cf7ae05d92ef77
|
[
"MIT"
] | null | null | null |
"""
Prende in input una lista con n numeri interi, e RITORNA una NUOVA lista che
contiene n tuple ciascuna da due elementi. Ogni tupla contiene un numero
preso dalla corrispondente posizione della lista di partenza, e il suo doppio.
Per esempio:
doppie([ 5, 3, 8])
deve dare la nuova lista
[(5,10), (3,6), (8,16)]
"""
def doppie(lista):
# scrivi qui
# INIZIO TEST - NON TOCCARE !
assert doppie([]) == []
assert doppie([3]) == [(3,6)]
assert doppie([2,7]) == [(2,4),(7,14)]
assert doppie([5,3,8]) == [(5,10), (3,6), (8,16)]
# FINE TEST
| 22.833333
| 79
| 0.644161
|
0c65bb9fea1af6d8c9abcd0840c9e945c923422d
| 3,235
|
py
|
Python
|
helper.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | 3
|
2021-07-31T16:23:26.000Z
|
2022-01-24T01:28:17.000Z
|
helper.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | null | null | null |
helper.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | 1
|
2019-08-07T06:02:52.000Z
|
2019-08-07T06:02:52.000Z
|
from markdown import markdown
from flask import url_for
from rdflib.namespace import DCTERMS
from rdflib import BNode, URIRef
from bs4 import BeautifulSoup
from config import Config
# from triplestore import Triplestore
import re
from urllib.parse import quote_plus
from datetime import datetime, timedelta
def render_concept_tree(html_doc):
soup = BeautifulSoup(html_doc, 'html.parser')
concept_hierarchy = soup.find(id='concept-hierarchy')
uls = concept_hierarchy.find_all('ul')
for i, ul in enumerate(uls):
# Don't add HTML class nested to the first 'ul' found.
if not i == 0:
ul['class'] = 'nested'
if ul.parent.name == 'li':
temp = BeautifulSoup(str(ul.parent.a.extract()), 'html.parser')
ul.parent.insert(0, BeautifulSoup('<span class="caret">', 'html.parser'))
ul.parent.span.insert(0, temp)
return soup
# def get_triplestore_created_time():
# """Get the string message of the last time the local graph cache was created."""
#
# MSG = 'Last updated {}.'
# for created_time in Config.g.objects(Triplestore.THIS_GRAPH, DCTERMS.created):
# created_time = created_time.toPython()
# now = datetime.now()
# now -= timedelta(minutes=1)
#
# last_updated = (datetime.now() - created_time).seconds // 60
# if not last_updated:
# LAST_UPDATE_VALUE = 'just now'
# else:
# LAST_UPDATE_VALUE = f'{last_updated} minutes ago'
#
# return MSG.format(LAST_UPDATE_VALUE)
def uri_label(uri):
return uri.split('#')[-1].split('/')[-1]
def render_property_restricted(text):
if isinstance(text, str):
length = 175
if len(text) > length:
return text[:length] + '...'
return text
def is_list(property):
if isinstance(property, list):
return True
return False
def render_popover(label, uri):
return '<span class="card-title"><a tabindex="0" class role="button" data-toggle="popover" data-trigger="focus" title data-content="<a href=\'{0}\'>{1}</a>" data-original-title="URI">{0}</a></span>'.format(label, uri)
def render(text):
if type(text) == BNode:
bnode = text
text = '<ul class="list-group pb-3">'
for s, p, o in Config.g.triples((bnode, None, None)):
# text += '<li>' + '<a href="{}">{}</a>'.format(p, uri_label(p)) + ' ' + render(o) + '</li>'
text += '<li class="list-group-item">{}: {}</li>'.format(render_popover(uri_label(p), p), render_popover(uri_label(o), o) if type(o) == URIRef else o)
text += '</ul>'
return text
if text[:4] == 'http':
return '<p><a href="{0}">{0}</a></p>'.format(text)
email_pattern = r"[a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*" \
r"[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
if re.match(email_pattern, text):
return '<p><a href="mailto:{0}">{0}</a></p>'.format(text)
return markdown(text)
def url_encode(url):
return quote_plus(url)
def render_instance_uri(uri, label):
return '<p><a href="{}">{}</a></p>'.format(url_for('routes.ob', uri=uri), label)
| 32.029703
| 221
| 0.592581
|
61fa173d68655ef833b840e0e14a36a1f15c4fae
| 11,131
|
py
|
Python
|
pytorch_toolbelt/modules/backbone/inceptionv4.py
|
papkov/pytorch-toolbelt
|
71d03d907f93fa73fbfba5eb89d26ad801e47e03
|
[
"MIT"
] | null | null | null |
pytorch_toolbelt/modules/backbone/inceptionv4.py
|
papkov/pytorch-toolbelt
|
71d03d907f93fa73fbfba5eb89d26ad801e47e03
|
[
"MIT"
] | null | null | null |
pytorch_toolbelt/modules/backbone/inceptionv4.py
|
papkov/pytorch-toolbelt
|
71d03d907f93fa73fbfba5eb89d26ad801e47e03
|
[
"MIT"
] | null | null | null |
# Orignal source
# https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/inceptionv4.py
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
__all__ = ["InceptionV4", "inceptionv4"]
pretrained_settings = {
"inceptionv4": {
"imagenet": {
"url": "http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth",
"input_space": "RGB",
"input_size": [3, 299, 299],
"input_range": [0, 1],
"mean": [0.5, 0.5, 0.5],
"std": [0.5, 0.5, 0.5],
"num_classes": 1000,
},
"imagenet+background": {
"url": "http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth",
"input_space": "RGB",
"input_size": [3, 299, 299],
"input_range": [0, 1],
"mean": [0.5, 0.5, 0.5],
"std": [0.5, 0.5, 0.5],
"num_classes": 1001,
},
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(
in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False
) # verify bias false
self.bn = nn.BatchNorm2d(
out_planes, eps=0.001, momentum=0.1, affine=True # value found in tensorflow # default pytorch value
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_3a(nn.Module):
def __init__(self):
super(Mixed_3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
BasicConv2d(64, 96, kernel_size=(3, 3), stride=1),
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_5a(nn.Module):
def __init__(self):
super(Mixed_5a, self).__init__()
self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class Inception_A(nn.Module):
def __init__(self):
super(Inception_A, self).__init__()
self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1), BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1),
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(384, 96, kernel_size=1, stride=1),
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(384, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
BasicConv2d(224, 256, kernel_size=3, stride=2),
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_B(nn.Module):
def __init__(self):
super(Inception_B, self).__init__()
self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)),
)
self.branch2 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
BasicConv2d(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
BasicConv2d(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1024, 128, kernel_size=1, stride=1),
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_B(nn.Module):
def __init__(self):
super(Reduction_B, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1), BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
BasicConv2d(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
BasicConv2d(320, 320, kernel_size=3, stride=2),
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1536, 256, kernel_size=1, stride=1),
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionV4, self).__init__()
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
# Modules
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2), # 0, layer0
BasicConv2d(32, 32, kernel_size=3, stride=1), # 1, layer0
BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), # 2
Mixed_3a(), # 3
Mixed_4a(), # 4
Mixed_5a(), # 5
Inception_A(), # 6
Inception_A(), # 7
Inception_A(), # 8
Inception_A(), # 9
Reduction_A(), # 10 # Mixed_6a
Inception_B(), # 11
Inception_B(), # 12
Inception_B(), # 13
Inception_B(), # 14
Inception_B(), # 15
Inception_B(), # 16
Inception_B(), # 17
Reduction_B(), # 18 # Mixed_7a
Inception_C(), # 19
Inception_C(), # 20
Inception_C(), # 21
)
self.last_linear = nn.Linear(1536, num_classes)
def logits(self, features):
# Allows image of any size to be processed
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.features(x)
x = self.logits(x)
return x
def inceptionv4(num_classes=1000, pretrained="imagenet"):
if pretrained:
settings = pretrained_settings["inceptionv4"][pretrained]
assert num_classes == settings["num_classes"], "num_classes should be {}, but is {}".format(
settings["num_classes"], num_classes
)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = InceptionV4(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings["url"]))
if pretrained == "imagenet":
new_last_linear = nn.Linear(1536, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings["input_space"]
model.input_size = settings["input_size"]
model.input_range = settings["input_range"]
model.mean = settings["mean"]
model.std = settings["std"]
else:
model = InceptionV4(num_classes=num_classes)
return model
| 34.039755
| 114
| 0.577037
|
9e685ed8e30d6df08564c0586a9a82eed1c79e8e
| 292
|
py
|
Python
|
amd64-linux/lib/python/mod_ppc603e_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 1
|
2020-06-15T10:41:18.000Z
|
2020-06-15T10:41:18.000Z
|
amd64-linux/lib/python/mod_ppc603e_turbo_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | null | null | null |
amd64-linux/lib/python/mod_ppc603e_turbo_commands.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 3
|
2020-08-10T10:25:02.000Z
|
2021-09-12T01:12:09.000Z
|
import ppc_commands
ppc_model = 'ppc603e'
funcs = {}
ppc_commands.setup_local_functions(ppc_model, funcs)
class_funcs = { ppc_model: funcs }
ppc_commands.enable_generic_ppc_commands(ppc_model)
ppc_commands.enable_fpu_commands(ppc_model)
ppc_commands.enable_classic_tlb_commands(ppc_model)
| 24.333333
| 52
| 0.84589
|
bf4d3a9888846547247bef1e860346e67cd5990f
| 17,230
|
py
|
Python
|
src/genie/libs/parser/asa/show_route.py
|
mirzawaqasahmed/genieparser
|
d6ce6f0cfd31aa6b0eef042f184e273e48b9d4d7
|
[
"Apache-2.0"
] | 2
|
2021-01-27T03:37:39.000Z
|
2021-01-27T03:40:50.000Z
|
src/genie/libs/parser/asa/show_route.py
|
mirzawaqasahmed/genieparser
|
d6ce6f0cfd31aa6b0eef042f184e273e48b9d4d7
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/asa/show_route.py
|
mirzawaqasahmed/genieparser
|
d6ce6f0cfd31aa6b0eef042f184e273e48b9d4d7
|
[
"Apache-2.0"
] | null | null | null |
''' show_route.py
Parser for the following show commands:
* show route
'''
import re
from netaddr import IPAddress
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional
# =============================================
# Schema for 'show route'
# =============================================
class ShowRouteSchema(MetaParser):
"""Schema for
* show route
"""
schema = {
'vrf': {
'default': {
'address_family': {
'ipv4': {
Optional('routes'): {
Any(): {
'candidate_default': bool,
Optional('subnet'): str,
'route': str,
Optional('active'): bool,
Optional('date'): str,
Optional('route_preference'): int,
Optional('metric'): int,
Optional('source_protocol'): str,
Optional('source_protocol_codes'): str,
Optional('next_hop'): {
Optional('outgoing_interface_name'): {
Any(): { # context_name for interface if there is no next_hop
Optional('outgoing_interface_name'): str
},
},
Optional('next_hop_list'): {
Any(): { # index
Optional('index'): int,
Optional('next_hop'): str,
Optional('outgoing_interface_name'): str
},
},
},
},
},
},
},
},
},
}
"""
Codes: L - local, C - connected, S - static, R - RIP, M - mobile, B - BGP
D - EIGRP, EX - EIGRP external, O - OSPF, IA - OSPF inter area
N1 - OSPF NSSA external type 1, N2 - OSPF NSSA external type 2
E1 - OSPF external type 1, E2 - OSPF external type 2, V - VPN
i - IS-IS, su - IS-IS summary, L1 - IS-IS level-1, L2 - IS-IS level-2
ia - IS-IS, * - candidate default, U - per-user static route
o - ODR, P - periodic downloaded static route, + - replicated route
"""
source_protocol_dict = {
'ospf' : ['O','IA','N1','N2','E1','E2'],
'odr' : ['o'],
'isis' : ['i','su','L1','L2','ia'],
'eigrp' : ['D','EX'],
'static' : ['S'],
'egp' : ['E'],
'mobile' : ['M'],
'local' : ['L'],
'connected' : ['C'],
'bgp' : ['B'],
'per-user static route': ['U'],
'rip' : ['R'],
'igrp': ['I'],
'replicated route': ['+'],
'periodic downloaded static route': ['P'],
'vpn': ['V']
}
# =============================================
# Parser for 'show route'
# =============================================
class ShowRoute(ShowRouteSchema):
"""Parser for
* show route
"""
cli_command = 'show route'
def cli(self, output=None):
if output is None:
# execute command to get output
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
routes = source_protocol = ''
index = 1
# S* 0.0.0.0 0.0.0.0 via 10.16.251.1, outside
# S 0.0.0.1 0.0.0.0 [10/5] via 10.16.255.1, outside
# O 10.121.65.0 255.255.255.0 [110/20] via 10.121.64.35, 7w0d, inside
# D EX 10.121.70.0 255.255.255.0 [170/345856] via 10.9.193.99, 2w1d, esavpn
p1 = re.compile(r'^(?P<code>(?!is)(?!via)[\w\*]+)\s*'
r'(?P<code2>[A-Z]+)?\s*'
r'(?P<network>\d+.\d+.\d+.\d+)\s*'
r'(?P<subnet>\d+.\d+.\d+.\d+)\s*'
r'(\[(?P<route_preference>[\d\/]+)\])?\s*'
r'(?P<route_check>[\S]*)\s*(?P<next_hop>\d+.\d+.\d+.\d+),\s*'
r'((?P<date>[\w\d]+),)?\s*(?P<context_name>[\S\s]+)$')
# via 10.16.251.2, pod1000
# [110/20] via 10.121.64.34, 7w0d, inside
p2 = re.compile(r'^(?P<network>\d+.\d+.\d+.\d+)?'
r'(\[(?P<route_preference>[\d\/]+)\])?\s*?'
r'(?P<route_check>[\S\s]*)\s'
r'(?P<next_hop>\d+.\d+.\d+.\d+),\s*'
r'((?P<date>[\d\w]+)?,)?\s*(?P<context_name>[\S\s]+)$')
# C 10.10.1.2 255.255.254.0 is directly connected, outside
p3 = re.compile(
r'^\s*(?P<code>(?!is)(?!via)[\w\*]+)\s*(?P<network>\d+.\d+.\d+.\d+)\s*'
'(?P<subnet>\d+.\d+.\d+.\d+)\s*(\[(?P<route_preference>[\d\/]+)\])?\s*'
'(?P<route_check>[\S\s]*)\s*,\s*(?P<context_name>[\S\s]+)$')
# is directly connected, pod2002
# connected by VPN (advertised), admin
p4 = re.compile(
r'^(?P<route_check>[\S\s]*),\s*(?P<context_name>[\S\s]+)$')
# V 10.10.1.4 255.255.255.255
p5 = re.compile(
r'^\s*(?P<code>(?!is)(?!via)[\w\*]+)\s*(?P<network>\d+.\d+.\d+.\d+)\s*'
'(?P<subnet>\d+.\d+.\d+.\d+)\s*(\[(?P<route_preference>[\d\/]+)\])?\s*'
'(?P<context_name>\S+)?$')
for line in out.splitlines():
line = line.strip()
# S* 0.0.0.0 0.0.0.0 via 10.16.251.1, outside
# S 0.0.0.1 0.0.0.0 [10/5] via 10.16.255.1, outside
# O 10.121.65.0 255.255.255.0 [110/20] via 10.121.64.35, 7w0d, inside
# D EX 10.121.68.0 255.255.255.0 [170/345856] via 10.9.193.99, 2w1d, esavpn
m = p1.match(line)
if m:
groups = m.groupdict()
dict_ipv4 = ret_dict.setdefault('vrf', {}).setdefault('default', {}). \
setdefault('address_family', {}).setdefault('ipv4', {}). \
setdefault('routes', {})
if 'via' in groups['route_check'] and groups['next_hop']:
if groups['code']:
code = groups['code']
code2 = groups['code2']
if code2:
code = '{} {}'.format(code, code2)
source_protocol_codes = code.strip()
for key, val in super().source_protocol_dict.items():
source_protocol_replaced = re.split('\*',source_protocol_codes)[0].strip()
code = source_protocol_replaced
if source_protocol_replaced in val:
source_protocol = key
if groups['network']:
routes = groups['network']
subnet = groups['subnet']
if '0.0.0.0' == subnet:
prefix_length = str(0)
else:
prefix_length = str(IPAddress(subnet).netmask_bits())
combined_ip = routes+'/'+prefix_length
dict_routes = dict_ipv4.setdefault(combined_ip, {})
dict_routes.update({'active': True})
dict_routes.update({'route': combined_ip})
dict_routes.update({'source_protocol_codes': code})
dict_routes.update({'source_protocol': source_protocol})
if groups['date']:
dict_routes.update({'date': groups['date']})
if '*' in groups['code']:
dict_routes.update({'candidate_default': True})
else:
dict_routes.update({'candidate_default': False})
if groups['route_preference']:
routepreference = groups['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metric = int(routepreference.split('/')[1])
dict_routes.update \
({'route_preference': route_preference})
dict_routes.update({'metric': metric})
else:
dict_routes.update \
({'route_preference': int(routepreference)})
if groups['next_hop']:
if groups['network'] and groups['next_hop']:
index = 1
next_hop = groups['next_hop']
outgoing_interface_name = groups['context_name']
dict_next_hop = dict_routes.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}).setdefault(index, {})
dict_next_hop.update({'index': index})
dict_next_hop.update({'next_hop': next_hop})
dict_next_hop.update \
({'outgoing_interface_name': outgoing_interface_name})
index += 1
continue
# via 10.16.251.2, pod1000
# [110/20] via 10.121.64.34, 7w0d, inside
m = p2.match(line)
if m:
groups = m.groupdict()
if 'via' in groups['route_check']:
if groups['network'] and groups['next_hop']:
index = 1
next_hop = groups['next_hop']
outgoing_interface_name = groups['context_name']
dict_next_hop = dict_routes.setdefault('next_hop', {}). \
setdefault('next_hop_list', {}).setdefault(index, {})
dict_next_hop.update({'index': index})
dict_next_hop.update({'next_hop': next_hop})
dict_next_hop.update \
({'outgoing_interface_name': outgoing_interface_name})
index += 1
continue
# C 10.10.1.2 255.255.254.0 is directly connected, outside
m = p3.match(line)
if m:
groups = m.groupdict()
dict_ipv4 = ret_dict.setdefault('vrf', {}).setdefault('default', {}). \
setdefault('address_family', {}).setdefault('ipv4', {}). \
setdefault('routes', {})
if 'is directly' in groups['route_check']:
if groups['code']:
source_protocol_codes = groups['code'].strip()
for key, val in super().source_protocol_dict.items():
source_protocol_replaced = re.split \
('\*',source_protocol_codes)[0].strip()
if source_protocol_replaced in val:
source_protocol = key
if groups['network']:
routes = groups['network']
subnet = groups['subnet']
if '0.0.0.0' == subnet:
prefix_length = str(0)
else:
prefix_length = str(IPAddress(subnet).netmask_bits())
combined_ip = routes+'/'+prefix_length
dict_routes = dict_ipv4.setdefault(combined_ip, {})
dict_routes.update({'active': True})
dict_routes.update({'route': combined_ip})
dict_routes.update({'source_protocol_codes': groups['code']})
dict_routes.update({'source_protocol': source_protocol})
if '*' in groups['code']:
dict_routes.update({'candidate_default': True})
else:
dict_routes.update({'candidate_default': False})
if groups['route_preference']:
routepreference = groups['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metric = int(routepreference.split('/')[1])
dict_routes.update \
({'route_preference': route_preference})
dict_routes.update({'metric': metric})
else:
dict_routes.update \
({'route_preference': int(routepreference)})
outgoing_interface_name = groups['context_name']
dict_via = dict_routes.setdefault('next_hop', {}). \
setdefault('outgoing_interface_name', {}). \
setdefault(outgoing_interface_name, {})
dict_via.update \
({'outgoing_interface_name': outgoing_interface_name})
continue
# is directly connected, pod2002
# connected by VPN (advertised), admin
m = p4.match(line)
if m:
groups = m.groupdict()
if 'is directly' in groups['route_check'] or 'connected by' \
in groups['route_check']:
outgoing_interface_name = groups['context_name']
dict_via = dict_routes.setdefault('next_hop', {}). \
setdefault('outgoing_interface_name', {}). \
setdefault(outgoing_interface_name, {})
dict_via.update \
({'outgoing_interface_name': outgoing_interface_name})
continue
# V 10.10.1.4 255.255.255.255
m = p5.match(line)
if m:
groups = m.groupdict()
dict_ipv4 = ret_dict.setdefault('vrf', {}).setdefault('default', {}). \
setdefault('address_family', {}).setdefault('ipv4', {}). \
setdefault('routes', {})
if groups['network'] and groups['context_name'] is None:
if groups['code']:
source_protocol_codes = groups['code'].strip()
for key, val in super().source_protocol_dict.items():
source_protocol_replaced = re.split \
('\*',source_protocol_codes)[0].strip()
if source_protocol_replaced in val:
source_protocol = key
if groups['network']:
routes = groups['network']
subnet = groups['subnet']
if '0.0.0.0' == subnet:
prefix_length = str(0)
else:
prefix_length = str(IPAddress(subnet).netmask_bits())
combined_ip = routes+'/'+prefix_length
dict_routes = dict_ipv4.setdefault(combined_ip, {})
dict_routes.update({'active': True})
dict_routes.update({'route': combined_ip})
dict_routes.update({'source_protocol_codes': groups['code']})
dict_routes.update({'source_protocol': source_protocol})
if '*' in groups['code']:
dict_routes.update({'candidate_default': True})
else:
dict_routes.update({'candidate_default': False})
if groups['route_preference']:
routepreference = groups['route_preference']
if '/' in routepreference:
route_preference = int(routepreference.split('/')[0])
metric = int(routepreference.split('/')[1])
dict_routes.update \
({'route_preference': route_preference})
dict_routes.update({'metric': metric})
else:
dict_routes.update \
({'route_preference': int(routepreference)})
continue
return ret_dict
| 48.948864
| 102
| 0.41805
|
9074570f0c9f451efac613f369675a3bd92a6fb6
| 14,377
|
py
|
Python
|
src/kgm/training/matching.py
|
mberr/ea-sota-comparison
|
0f7bb679b61675bfb65a2cd7462854968d876df2
|
[
"MIT"
] | 12
|
2020-11-03T01:31:30.000Z
|
2021-04-06T01:21:41.000Z
|
src/kgm/training/matching.py
|
j-huthmacher/ea-sota-comparison
|
0f7bb679b61675bfb65a2cd7462854968d876df2
|
[
"MIT"
] | null | null | null |
src/kgm/training/matching.py
|
j-huthmacher/ea-sota-comparison
|
0f7bb679b61675bfb65a2cd7462854968d876df2
|
[
"MIT"
] | 3
|
2021-03-06T13:00:32.000Z
|
2022-03-05T11:53:06.000Z
|
# coding=utf-8
"""Training loops for KG matching models."""
import logging
from abc import abstractmethod
from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Type, Union
import torch
from torch.optim import Optimizer
from torch.utils import data
from .base import BaseTrainer, TrainerCallback
from ..data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ..eval import evaluate_matching_model
from ..models import KGMatchingModel
from ..modules import MatchingLoss, Similarity
from ..utils.torch_utils import maximize_memory_utilization
from ..utils.types import IDAlignment, NodeIDs
logger = logging.getLogger(name=__name__)
class AlignmentTrainerCallback(TrainerCallback):
"""Abstract class for trainer callbacks."""
@abstractmethod
def on_epoch_start(self, epoch: int, trainer: 'AlignmentModelTrainer') -> None:
"""
Perform actions before the epoch starts.
:param epoch:
The epoch.
:param trainer:
The trainer.
"""
raise NotImplementedError
class NodeSampler:
"""Abstract class for node sampler."""
@abstractmethod
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs:
"""
Sample negative node indices for each side.
positive pair:
(positive_batch[0, i], positive_batch[1, i])
negative_pair:
(positive_batch[0, i], negative_batch[0, i, j])
:param positive_batch: shape: (2, pos_batch_size)
The batch of aligned nodes.
:return: shape: (2, pos_batch_size, num_negatives)
The negative node IDs. result[0] has to be combined with positive_batch[1] for a valid pair.
"""
raise NotImplementedError
class RandomNodeSampler(NodeSampler):
"""Randomly select additional nodes."""
def __init__(
self,
num_nodes: Mapping[MatchSideEnum, int],
num_negatives: int,
):
"""
Initialize the sampler.
:param num_nodes:
The number of nodes on each side.
:param num_negatives: >=0
The absolute number of negatives samples for each positive one.
"""
self.num_nodes = num_nodes
self.num_negatives = num_negatives
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs: # noqa: D102
return torch.stack([
torch.randint(self.num_nodes[side], size=(positive_batch.shape[1], self.num_negatives))
for side in SIDES
], dim=0)
@torch.no_grad()
def _all_knn(
node_repr: Mapping[MatchSideEnum, torch.FloatTensor],
similarity: Similarity,
batch_size: int,
k: int,
num_nodes: Mapping[MatchSideEnum, int],
) -> Mapping[MatchSideEnum, NodeIDs]:
"""
Get kNN for all nodes.
:param node_repr: shape: (num_nodes_on_side, dim)
The node representations.
:param similarity:
The similarity measure.
:param batch_size: >0
The batch size.
:param k: >0
The number of nearest neighbors to return.
:param num_nodes:
The number of nodes on each side.
:return:
A mapping from side to an array of shape (num_nodes, k) with node IDs of the kNN.
"""
storage_device = torch.device("cpu")
compute_device = next(iter(node_repr.values())).device
n, m = [num_nodes[side] for side in SIDES]
left, right = [node_repr[side] for side in SIDES]
# allocate buffers
hard_right = torch.empty(n, k, dtype=torch.long, device=storage_device)
top_v = torch.full(size=(k, m), fill_value=float('-inf'), device=compute_device)
top_i = -torch.ones(k, m, dtype=torch.long, device=compute_device)
# batched processing
for start in range(0, n, batch_size):
stop = min(start + batch_size, n)
# compute batch similarity, shape: (b, m)
batch_sim = similarity.all_to_all(
left=left[start:stop],
right=right,
)
hard_right[start:stop] = batch_sim.topk(k=k, dim=1, largest=True).indices.to(storage_device)
# combine with running top-k, shape: (b + k, m)
top_v, b_top_i = torch.cat([batch_sim, top_v], dim=0).topk(k=k, dim=0, largest=True)
real_batch_size = stop - start
mask = b_top_i < real_batch_size
top_i[mask] = b_top_i[mask]
assert (top_i >= 0).all()
return dict(zip(SIDES, (top_i.t().to(storage_device), hard_right)))
class HardNegativeSampler(NodeSampler, AlignmentTrainerCallback):
"""Select hard negatives."""
hard_negatives: Mapping[MatchSideEnum, NodeIDs]
def __init__(self, num_negatives: int, update_frequency: int = 10):
"""Initialize the sampler."""
self.hard_negatives = None
self.num_negatives = num_negatives
self.batch_size = None
self.update_frequency = update_frequency
@torch.no_grad()
def on_epoch_start(
self,
epoch: int,
trainer: 'AlignmentModelTrainer',
) -> None: # noqa: D102
if epoch % self.update_frequency != 0:
return
logger.debug('Updating hard negatives.')
node_repr = trainer.model.get_node_representations()
num_nodes = {
side: x.shape[0]
for side, x in node_repr.items()
}
self.hard_negatives, self.batch_size = maximize_memory_utilization(
_all_knn,
parameter_name="batch_size",
parameter_max_value=self.batch_size or max(v.shape[0] for v in node_repr.values()),
node_repr=node_repr,
similarity=trainer.similarity,
k=self.num_negatives,
num_nodes=num_nodes,
)
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs: # noqa: D102
if self.hard_negatives is None:
raise AssertionError('hard negatives have never been updated.')
# look-up hard negatives
return torch.stack(
tensors=[
self.hard_negatives[side][pos]
for side, pos in zip(SIDES, positive_batch.flip(0))
],
dim=0,
)
#: A 3-tuple:
# * indices (global)
# * positives (local)
# * negatives (local)
AlignmentBatch = Tuple[Optional[Mapping[MatchSideEnum, NodeIDs]], IDAlignment, Optional[NodeIDs]]
class AlignmentBatchCollator:
"""A custom collator for adding negative nodes to a batch of positives."""
def __init__(
self,
node_sampler: Optional[NodeSampler] = None,
):
"""
Initialize the collator.
:param node_sampler:
The node sampler.
"""
self.sampler = node_sampler
def collate(
self,
positives: List[Tuple[IDAlignment]],
) -> AlignmentBatch:
"""
Collate a batch.
:param positives:
A tuple of positive pairs.
:return:
A tuple of batch node indices per side and the number of positives in the batch.
"""
global_positives: IDAlignment = torch.stack([p[0] for p in positives], dim=-1)
# no sampling
if self.sampler is None:
return None, global_positives, None
global_negatives = self.sampler.sample(positive_batch=global_positives)
# Translate to batch local indices
indices = dict()
local_positives = []
local_negatives = []
for side, pos_on_side, neg_on_side in zip(SIDES, global_positives, global_negatives):
# There are positive indices P and negative indices N
# There may be duplicates
# * in P, due to 1-n alignments
# * in N, due to random sampling with replacement
# * between P and N due to not filtering in N
# We do not want to re-compute representations; thus we only keep the unique indices.
indices_on_side = torch.cat([pos_on_side.unsqueeze(dim=-1), neg_on_side], dim=-1)
indices[side], inverse = indices_on_side.unique(sorted=False, return_inverse=True)
local_positives.append(inverse[:, 0])
local_negatives.append(inverse[:, 1:])
return (
indices,
torch.stack(local_positives, dim=0),
torch.stack(local_negatives, dim=0),
)
def prepare_alignment_batch_data_loader(
dataset: KnowledgeGraphAlignmentDataset,
positive_batch_size: Optional[int] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
) -> data.DataLoader:
"""
Prepare a PyTorch data loader for alignment model training.
:param dataset:
The knowledge graph alignment dataset.
:param positive_batch_size:
The batch size for alignment pairs.
:param negative_sampler:
The sampler for additional nodes from the graphs.
:param num_workers:
The number of worker processes.
.. seealso ::
torch.utils.data.DataLoader
:return:
The data loader.
"""
positives = data.TensorDataset(dataset.alignment.train.t())
if positive_batch_size is None:
positive_batch_size = dataset.alignment.num_train
collator = AlignmentBatchCollator(node_sampler=negative_sampler)
return data.DataLoader(
dataset=positives,
batch_size=positive_batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collator.collate,
pin_memory=True,
)
class AlignmentModelTrainer(BaseTrainer[AlignmentBatch]):
"""A wrapper around a model encapsulating training and evaluation."""
#: The model instance
model: KGMatchingModel
#: The similarity instance
similarity: Similarity
#: The loss instance
loss: MatchingLoss
def __init__(
self,
model: KGMatchingModel,
similarity: Similarity,
dataset: KnowledgeGraphAlignmentDataset,
loss: MatchingLoss,
batch_size: Optional[int] = None,
eval_frequency: Optional[int] = None,
eval_batch_size: Optional[int] = None,
optimizer_cls: Type[Optimizer] = None,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
clip_grad_norm: Optional[float] = None,
early_stopping_key: Optional[Union[str, Sequence[str]]] = None,
larger_is_better: bool = False,
patience: int = 3,
minimum_relative_difference: float = 0.,
accumulate_gradients: int = 1,
device: Optional[torch.device] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
):
"""
Initialize a new training loop.
:param model:
The model.
:param similarity:
The similarity.
:param dataset:
The dataset.
:param loss:
The loss instance.
:param batch_size:
The batch size, or None for full-batch training.
:param eval_frequency:
The evaluation frequency.
:param eval_batch_size:
The maximum batch size used for evaluation. None leads to automatic optimization.
:param optimizer_cls:
The optimizer class.
:param optimizer_kwargs:
Keyword-based arguments for the optimizer.
:param clip_grad_norm:
Whether to apply gradient clipping (norm-based).
:param early_stopping_key:
The evaluation key used for early stopping, a sequence of keys to address a value in the nested dictionary
of evaluation results.
:param larger_is_better:
Whether a larger value corresponds to a better result.
:param patience:
The patience, i.e. number of steps without improvement to wait until the training is stopped.
:param minimum_relative_difference:
The minimum relative difference in the metric value to consider an improvement.
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
:param num_workers:
The number of workers to use for preparing batches.
"""
super().__init__(
model=model,
train_batch_size=batch_size,
eval_frequency=eval_frequency,
eval_batch_size=eval_batch_size,
optimizer_cls=optimizer_cls,
optimizer_kwargs=optimizer_kwargs,
clip_grad_norm=clip_grad_norm,
early_stopping_key=early_stopping_key,
larger_is_better=larger_is_better,
patience=patience,
minimum_relative_difference=minimum_relative_difference,
accumulate_gradients=accumulate_gradients,
device=device,
)
self.similarity = similarity
self.loss = loss
self.dataset = dataset
self.alignment = dataset.alignment
self.num_workers = num_workers
self.negative_sampler = negative_sampler
if isinstance(negative_sampler, TrainerCallback):
self.register_callbacks(negative_sampler)
def _iter_batches(self) -> Iterable[AlignmentBatch]: # noqa: D102
return prepare_alignment_batch_data_loader(
dataset=self.dataset,
positive_batch_size=self.train_batch_size,
negative_sampler=self.negative_sampler,
num_workers=self.num_workers,
)
def _train_one_batch(self, batch: AlignmentBatch) -> Tuple[torch.Tensor, int]:
# Unpack
batch_node_indices, batch_alignment, negatives = batch
# Calculate node representations
node_repr = self.model(indices=batch_node_indices)
# return batch loss
return self.loss(
alignment=batch_alignment,
representations=node_repr,
negatives=negatives,
), batch_alignment.shape[1]
def _eval(self) -> Tuple[Mapping[str, Any], Optional[int]]:
"""Evaluate the model."""
return evaluate_matching_model(
model=self.model,
alignments=self.alignment.to_dict(),
similarity=self.similarity,
eval_batch_size=self.eval_batch_size,
)
| 33.357309
| 118
| 0.63379
|
0aa58f52d5730a0d3bb4700f372eb29fa124963b
| 2,431
|
py
|
Python
|
comodit_client/console/__main__.py
|
AymericDuvivier/comodit-client
|
cd92b43240181ab7178545e48ca854ee6bc86bfc
|
[
"MIT"
] | 1
|
2015-01-20T17:24:34.000Z
|
2015-01-20T17:24:34.000Z
|
comodit_client/console/__main__.py
|
AymericDuvivier/comodit-client
|
cd92b43240181ab7178545e48ca854ee6bc86bfc
|
[
"MIT"
] | null | null | null |
comodit_client/console/__main__.py
|
AymericDuvivier/comodit-client
|
cd92b43240181ab7178545e48ca854ee6bc86bfc
|
[
"MIT"
] | 24
|
2016-09-07T15:28:00.000Z
|
2021-12-08T16:03:16.000Z
|
# coding: utf-8
from __future__ import print_function
import sys, argparse
from comodit_client.console.core import ComodITConsole
from comodit_client.config import Config, ConfigException
if __name__ == '__main__':
# Load configuration
try:
config = Config()
except ConfigException as e:
print("Configuration error:")
print(e.msg)
exit(-1)
parser = argparse.ArgumentParser(formatter_class = argparse.RawDescriptionHelpFormatter,
description = "This is the ComodIT console.")
parser.add_argument('-P', "--profile", dest = "profile", help = "A profile from comoditrc file", default = None)
parser.add_argument('-a', "--api", dest = "api", help = "URL of the API", default = None)
parser.add_argument('-u', "--user", dest = "username", help = "username on comodit server", default = None)
parser.add_argument('-p', "--pass", dest = "password", help = "password on comodit server", default = None)
parser.add_argument('-i', "--insecure", dest = "insecure", help = "Tells the client to ignore self-signed certificates", action = "store_true", default = False)
parser.add_argument('-f', "--file", dest = "file", help = "Tells the client to execute the content of given file", default = None)
parser.add_argument('-d', "--debug", dest = "debug", help = "Tells the client to work in debug mode, causing every exception to be considered as an error", action = "store_true", default = False)
options = parser.parse_known_args(args = sys.argv)[0]
# Use profile data to configure connection.
if not options.api is None:
api = options.api
else:
api = config.get_api(options.profile)
options.api = api
if not options.username is None:
username = options.username
else:
username = config.get_username(options.profile)
options.username = username
if not options.password is None:
password = options.password
else:
password = config.get_password(options.profile)
options.password = password
if (username == None) or (api == None) or (password == None):
raise Exception("No credentials found")
console = ComodITConsole(options.debug)
console.connect(api, username, password, options.insecure)
if options.file is None:
console.interact()
else:
console.execute_file(options.file)
| 41.913793
| 199
| 0.663924
|
2cd98c5a5cb1fd25700eafa2a0ae609c39832c05
| 39,296
|
py
|
Python
|
awx_collection/plugins/module_utils/controller_api.py
|
SysBind/awx
|
2e0dd61bb63d729054e97b9cf3560b3f6bc63d4f
|
[
"Apache-2.0"
] | null | null | null |
awx_collection/plugins/module_utils/controller_api.py
|
SysBind/awx
|
2e0dd61bb63d729054e97b9cf3560b3f6bc63d4f
|
[
"Apache-2.0"
] | null | null | null |
awx_collection/plugins/module_utils/controller_api.py
|
SysBind/awx
|
2e0dd61bb63d729054e97b9cf3560b3f6bc63d4f
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from .controller_module import ControllerModule
from ansible.module_utils.urls import Request, SSLValidationError, ConnectionError
from ansible.module_utils.six import PY2
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils.six.moves.http_cookiejar import CookieJar
from distutils.version import LooseVersion as Version
import time
from json import loads, dumps
class ControllerAPIModule(ControllerModule):
# TODO: Move the collection version check into controller_module.py
# This gets set by the make process so whatever is in here is irrelevant
_COLLECTION_VERSION = "0.0.1-devel"
_COLLECTION_TYPE = "awx"
# This maps the collections type (awx/tower) to the values returned by the API
# Those values can be found in awx/api/generics.py line 204
collection_to_version = {
'awx': 'AWX',
'controller': 'Red Hat Automation Platform Controller',
}
session = None
IDENTITY_FIELDS = {'users': 'username', 'workflow_job_template_nodes': 'identifier', 'instances': 'hostname'}
ENCRYPTED_STRING = "$encrypted$"
def __init__(self, argument_spec, direct_params=None, error_callback=None, warn_callback=None, **kwargs):
kwargs['supports_check_mode'] = True
super().__init__(
argument_spec=argument_spec, direct_params=direct_params, error_callback=error_callback, warn_callback=warn_callback, **kwargs
)
self.session = Request(cookies=CookieJar(), validate_certs=self.verify_ssl)
if 'update_secrets' in self.params:
self.update_secrets = self.params.pop('update_secrets')
else:
self.update_secrets = True
@staticmethod
def param_to_endpoint(name):
exceptions = {'inventory': 'inventories', 'target_team': 'teams', 'workflow': 'workflow_job_templates'}
return exceptions.get(name, '{0}s'.format(name))
@staticmethod
def get_name_field_from_endpoint(endpoint):
return ControllerAPIModule.IDENTITY_FIELDS.get(endpoint, 'name')
def get_item_name(self, item, allow_unknown=False):
if item:
if 'name' in item:
return item['name']
for field_name in ControllerAPIModule.IDENTITY_FIELDS.values():
if field_name in item:
return item[field_name]
if item.get('type', None) in ('o_auth2_access_token', 'credential_input_source'):
return item['id']
if allow_unknown:
return 'unknown'
if item:
self.exit_json(msg='Cannot determine identity field for {0} object.'.format(item.get('type', 'unknown')))
else:
self.exit_json(msg='Cannot determine identity field for Undefined object.')
def head_endpoint(self, endpoint, *args, **kwargs):
return self.make_request('HEAD', endpoint, **kwargs)
def get_endpoint(self, endpoint, *args, **kwargs):
return self.make_request('GET', endpoint, **kwargs)
def patch_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('PATCH', endpoint, **kwargs)
def post_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('POST', endpoint, **kwargs)
def delete_endpoint(self, endpoint, *args, **kwargs):
# Handle check mode
if self.check_mode:
self.json_output['changed'] = True
self.exit_json(**self.json_output)
return self.make_request('DELETE', endpoint, **kwargs)
def get_all_endpoint(self, endpoint, *args, **kwargs):
response = self.get_endpoint(endpoint, *args, **kwargs)
if 'next' not in response['json']:
raise RuntimeError('Expected list from API at {0}, got: {1}'.format(endpoint, response))
next_page = response['json']['next']
if response['json']['count'] > 10000:
self.fail_json(msg='The number of items being queried for is higher than 10,000.')
while next_page is not None:
next_response = self.get_endpoint(next_page)
response['json']['results'] = response['json']['results'] + next_response['json']['results']
next_page = next_response['json']['next']
response['json']['next'] = next_page
return response
def get_one(self, endpoint, name_or_id=None, allow_none=True, **kwargs):
new_kwargs = kwargs.copy()
if name_or_id:
name_field = self.get_name_field_from_endpoint(endpoint)
new_data = kwargs.get('data', {}).copy()
if name_field in new_data:
self.fail_json(msg="You can't specify the field {0} in your search data if using the name_or_id field".format(name_field))
try:
new_data['or__id'] = int(name_or_id)
new_data['or__{0}'.format(name_field)] = name_or_id
except ValueError:
# If we get a value error, then we didn't have an integer so we can just pass and fall down to the fail
new_data[name_field] = name_or_id
new_kwargs['data'] = new_data
response = self.get_endpoint(endpoint, **new_kwargs)
if response['status_code'] != 200:
fail_msg = "Got a {0} response when trying to get one from {1}".format(response['status_code'], endpoint)
if 'detail' in response.get('json', {}):
fail_msg += ', detail: {0}'.format(response['json']['detail'])
self.fail_json(msg=fail_msg)
if 'count' not in response['json'] or 'results' not in response['json']:
self.fail_json(msg="The endpoint did not provide count and results")
if response['json']['count'] == 0:
if allow_none:
return None
else:
self.fail_wanted_one(response, endpoint, new_kwargs.get('data'))
elif response['json']['count'] > 1:
if name_or_id:
# Since we did a name or ID search and got > 1 return something if the id matches
for asset in response['json']['results']:
if str(asset['id']) == name_or_id:
return asset
# We got > 1 and either didn't find something by ID (which means multiple names)
# Or we weren't running with a or search and just got back too many to begin with.
self.fail_wanted_one(response, endpoint, new_kwargs.get('data'))
return response['json']['results'][0]
def fail_wanted_one(self, response, endpoint, query_params):
sample = response.copy()
if len(sample['json']['results']) > 1:
sample['json']['results'] = sample['json']['results'][:2] + ['...more results snipped...']
url = self.build_url(endpoint, query_params)
display_endpoint = url.geturl()[len(self.host):] # truncate to not include the base URL
self.fail_json(
msg="Request to {0} returned {1} items, expected 1".format(display_endpoint, response['json']['count']),
query=query_params,
response=sample,
total_results=response['json']['count'],
)
def get_exactly_one(self, endpoint, name_or_id=None, **kwargs):
return self.get_one(endpoint, name_or_id=name_or_id, allow_none=False, **kwargs)
def resolve_name_to_id(self, endpoint, name_or_id):
return self.get_exactly_one(endpoint, name_or_id)['id']
def make_request(self, method, endpoint, *args, **kwargs):
# In case someone is calling us directly; make sure we were given a method, let's not just assume a GET
if not method:
raise Exception("The HTTP method must be defined")
if method in ['POST', 'PUT', 'PATCH']:
url = self.build_url(endpoint)
else:
url = self.build_url(endpoint, query_params=kwargs.get('data'))
# Extract the headers, this will be used in a couple of places
headers = kwargs.get('headers', {})
# Authenticate to AWX (if we don't have a token and if not already done so)
if not self.oauth_token and not self.authenticated:
# This method will set a cookie in the cookie jar for us and also an oauth_token
self.authenticate(**kwargs)
if self.oauth_token:
# If we have a oauth token, we just use a bearer header
headers['Authorization'] = 'Bearer {0}'.format(self.oauth_token)
if method in ['POST', 'PUT', 'PATCH']:
headers.setdefault('Content-Type', 'application/json')
kwargs['headers'] = headers
data = None # Important, if content type is not JSON, this should not be dict type
if headers.get('Content-Type', '') == 'application/json':
data = dumps(kwargs.get('data', {}))
try:
response = self.session.open(method, url.geturl(), headers=headers, validate_certs=self.verify_ssl, follow_redirects=True, data=data)
except (SSLValidationError) as ssl_err:
self.fail_json(msg="Could not establish a secure connection to your host ({1}): {0}.".format(url.netloc, ssl_err))
except (ConnectionError) as con_err:
self.fail_json(msg="There was a network error of some kind trying to connect to your host ({1}): {0}.".format(url.netloc, con_err))
except (HTTPError) as he:
# Sanity check: Did the server send back some kind of internal error?
if he.code >= 500:
self.fail_json(msg='The host sent back a server error ({1}): {0}. Please check the logs and try again later'.format(url.path, he))
# Sanity check: Did we fail to authenticate properly? If so, fail out now; this is always a failure.
elif he.code == 401:
self.fail_json(msg='Invalid authentication credentials for {0} (HTTP 401).'.format(url.path))
# Sanity check: Did we get a forbidden response, which means that the user isn't allowed to do this? Report that.
elif he.code == 403:
self.fail_json(msg="You don't have permission to {1} to {0} (HTTP 403).".format(url.path, method))
# Sanity check: Did we get a 404 response?
# Requests with primary keys will return a 404 if there is no response, and we want to consistently trap these.
elif he.code == 404:
if kwargs.get('return_none_on_404', False):
return None
self.fail_json(msg='The requested object could not be found at {0}.'.format(url.path))
# Sanity check: Did we get a 405 response?
# A 405 means we used a method that isn't allowed. Usually this is a bad request, but it requires special treatment because the
# API sends it as a logic error in a few situations (e.g. trying to cancel a job that isn't running).
elif he.code == 405:
self.fail_json(msg="Cannot make a request with the {0} method to this endpoint {1}".format(method, url.path))
# Sanity check: Did we get some other kind of error? If so, write an appropriate error message.
elif he.code >= 400:
# We are going to return a 400 so the module can decide what to do with it
page_data = he.read()
try:
return {'status_code': he.code, 'json': loads(page_data)}
# JSONDecodeError only available on Python 3.5+
except ValueError:
return {'status_code': he.code, 'text': page_data}
elif he.code == 204 and method == 'DELETE':
# A 204 is a normal response for a delete function
pass
else:
self.fail_json(msg="Unexpected return code when calling {0}: {1}".format(url.geturl(), he))
except (Exception) as e:
self.fail_json(msg="There was an unknown error when trying to connect to {2}: {0} {1}".format(type(e).__name__, e, url.geturl()))
if not self.version_checked:
# In PY2 we get back an HTTPResponse object but PY2 is returning an addinfourl
# First try to get the headers in PY3 format and then drop down to PY2.
try:
controller_type = response.getheader('X-API-Product-Name', None)
controller_version = response.getheader('X-API-Product-Version', None)
except Exception:
controller_type = response.info().getheader('X-API-Product-Name', None)
controller_version = response.info().getheader('X-API-Product-Version', None)
parsed_collection_version = Version(self._COLLECTION_VERSION).version
parsed_controller_version = Version(controller_version).version
if controller_type == 'AWX':
collection_compare_ver = parsed_collection_version[0]
controller_compare_ver = parsed_controller_version[0]
else:
collection_compare_ver = "{0}.{1}".format(parsed_collection_version[0], parsed_collection_version[1])
controller_compare_ver = '{0}.{1}'.format(parsed_controller_version[0], parsed_controller_version[1])
if self._COLLECTION_TYPE not in self.collection_to_version or self.collection_to_version[self._COLLECTION_TYPE] != controller_type:
self.warn("You are using the {0} version of this collection but connecting to {1}".format(self._COLLECTION_TYPE, controller_type))
elif collection_compare_ver != controller_compare_ver:
self.warn(
"You are running collection version {0} but connecting to {2} version {1}".format(
self._COLLECTION_VERSION, controller_version, controller_type
)
)
self.version_checked = True
response_body = ''
try:
response_body = response.read()
except (Exception) as e:
self.fail_json(msg="Failed to read response body: {0}".format(e))
response_json = {}
if response_body and response_body != '':
try:
response_json = loads(response_body)
except (Exception) as e:
self.fail_json(msg="Failed to parse the response json: {0}".format(e))
if PY2:
status_code = response.getcode()
else:
status_code = response.status
return {'status_code': status_code, 'json': response_json}
def authenticate(self, **kwargs):
if self.username and self.password:
# Attempt to get a token from /api/v2/tokens/ by giving it our username/password combo
# If we have a username and password, we need to get a session cookie
login_data = {
"description": "Automation Platform Controller Module Token",
"application": None,
"scope": "write",
}
# Post to the tokens endpoint with baisc auth to try and get a token
api_token_url = (self.url._replace(path='/api/v2/tokens/')).geturl()
try:
response = self.session.open(
'POST',
api_token_url,
validate_certs=self.verify_ssl,
follow_redirects=True,
force_basic_auth=True,
url_username=self.username,
url_password=self.password,
data=dumps(login_data),
headers={'Content-Type': 'application/json'},
)
except HTTPError as he:
try:
resp = he.read()
except Exception as e:
resp = 'unknown {0}'.format(e)
self.fail_json(msg='Failed to get token: {0}'.format(he), response=resp)
except (Exception) as e:
# Sanity check: Did the server send back some kind of internal error?
self.fail_json(msg='Failed to get token: {0}'.format(e))
token_response = None
try:
token_response = response.read()
response_json = loads(token_response)
self.oauth_token_id = response_json['id']
self.oauth_token = response_json['token']
except (Exception) as e:
self.fail_json(msg="Failed to extract token information from login response: {0}".format(e), **{'response': token_response})
# If we have neither of these, then we can try un-authenticated access
self.authenticated = True
def delete_if_needed(self, existing_item, on_delete=None, auto_exit=True):
# This will exit from the module on its own.
# If the method successfully deletes an item and on_delete param is defined,
# the on_delete parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item is not defined (so no delete needs to happen)
# 2. The response from AWX from calling the delete on the endpont. It's up to you to process the response and exit from the module
# Note: common error codes from the AWX API can cause the module to fail
if existing_item:
# If we have an item, we can try to delete it
try:
item_url = existing_item['url']
item_type = existing_item['type']
item_id = existing_item['id']
item_name = self.get_item_name(existing_item, allow_unknown=True)
except KeyError as ke:
self.fail_json(msg="Unable to process delete of item due to missing data {0}".format(ke))
response = self.delete_endpoint(item_url)
if response['status_code'] in [202, 204]:
if on_delete:
on_delete(self, response['json'])
self.json_output['changed'] = True
self.json_output['id'] = item_id
self.exit_json(**self.json_output)
if auto_exit:
self.exit_json(**self.json_output)
else:
return self.json_output
else:
if 'json' in response and '__all__' in response['json']:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']['__all__'][0]))
elif 'json' in response:
# This is from a project delete (if there is an active job against it)
if 'error' in response['json']:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']['error']))
else:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['json']))
else:
self.fail_json(msg="Unable to delete {0} {1}: {2}".format(item_type, item_name, response['status_code']))
else:
if auto_exit:
self.exit_json(**self.json_output)
else:
return self.json_output
def modify_associations(self, association_endpoint, new_association_list):
# if we got None instead of [] we are not modifying the association_list
if new_association_list is None:
return
# First get the existing associations
response = self.get_all_endpoint(association_endpoint)
existing_associated_ids = [association['id'] for association in response['json']['results']]
# Disassociate anything that is in existing_associated_ids but not in new_association_list
ids_to_remove = list(set(existing_associated_ids) - set(new_association_list))
for an_id in ids_to_remove:
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id), 'disassociate': True}})
if response['status_code'] == 204:
self.json_output['changed'] = True
else:
self.fail_json(msg="Failed to disassociate item {0}".format(response['json'].get('detail', response['json'])))
# Associate anything that is in new_association_list but not in `association`
for an_id in list(set(new_association_list) - set(existing_associated_ids)):
response = self.post_endpoint(association_endpoint, **{'data': {'id': int(an_id)}})
if response['status_code'] == 204:
self.json_output['changed'] = True
else:
self.fail_json(msg="Failed to associate item {0}".format(response['json'].get('detail', response['json'])))
def copy_item(self, existing_item, copy_from_name_or_id, new_item_name, endpoint=None, item_type='unknown', copy_lookup_data=None):
if existing_item is not None:
self.warn(msg="A {0} with the name {1} already exists.".format(item_type, new_item_name))
self.json_output['changed'] = False
self.json_output['copied'] = False
return existing_item
# Lookup existing item to copy from
copy_from_lookup = self.get_one(endpoint, name_or_id=copy_from_name_or_id, **{'data': copy_lookup_data})
# Fail if the copy_from_lookup is empty
if copy_from_lookup is None:
self.fail_json(msg="A {0} with the name {1} was not able to be found.".format(item_type, copy_from_name_or_id))
# Do checks for copy permisions if warrented
if item_type == 'workflow_job_template':
copy_get_check = self.get_endpoint(copy_from_lookup['related']['copy'])
if copy_get_check['status_code'] in [200]:
if (
copy_get_check['json']['can_copy']
and copy_get_check['json']['can_copy_without_user_input']
and not copy_get_check['json']['templates_unable_to_copy']
and not copy_get_check['json']['credentials_unable_to_copy']
and not copy_get_check['json']['inventories_unable_to_copy']
):
# Because checks have passed
self.json_output['copy_checks'] = 'passed'
else:
self.fail_json(msg="Unable to copy {0} {1} error: {2}".format(item_type, copy_from_name_or_id, copy_get_check))
else:
self.fail_json(msg="Error accessing {0} {1} error: {2} ".format(item_type, copy_from_name_or_id, copy_get_check))
response = self.post_endpoint(copy_from_lookup['related']['copy'], **{'data': {'name': new_item_name}})
if response['status_code'] in [201]:
self.json_output['id'] = response['json']['id']
self.json_output['changed'] = True
self.json_output['copied'] = True
new_existing_item = response['json']
else:
if 'json' in response and '__all__' in response['json']:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, new_item_name, response['json']['__all__'][0]))
elif 'json' in response:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, new_item_name, response['json']))
else:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, new_item_name, response['status_code']))
return new_existing_item
def create_if_needed(self, existing_item, new_item, endpoint, on_create=None, auto_exit=True, item_type='unknown', associations=None):
# This will exit from the module on its own
# If the method successfully creates an item and on_create param is defined,
# the on_create parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item is already defined (so no create needs to happen)
# 2. The response from AWX from calling the patch on the endpont. It's up to you to process the response and exit from the module
# Note: common error codes from the AWX API can cause the module to fail
response = None
if not endpoint:
self.fail_json(msg="Unable to create new {0} due to missing endpoint".format(item_type))
item_url = None
if existing_item:
try:
item_url = existing_item['url']
except KeyError as ke:
self.fail_json(msg="Unable to process create of item due to missing data {0}".format(ke))
else:
# If we don't have an exisitng_item, we can try to create it
# We have to rely on item_type being passed in since we don't have an existing item that declares its type
# We will pull the item_name out from the new_item, if it exists
item_name = self.get_item_name(new_item, allow_unknown=True)
response = self.post_endpoint(endpoint, **{'data': new_item})
# 200 is response from approval node creation on tower 3.7.3 or awx 15.0.0 or earlier.
if response['status_code'] in [200, 201]:
self.json_output['name'] = 'unknown'
for key in ('name', 'username', 'identifier', 'hostname'):
if key in response['json']:
self.json_output['name'] = response['json'][key]
self.json_output['id'] = response['json']['id']
self.json_output['changed'] = True
item_url = response['json']['url']
else:
if 'json' in response and '__all__' in response['json']:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['json']['__all__'][0]))
elif 'json' in response:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['json']))
else:
self.fail_json(msg="Unable to create {0} {1}: {2}".format(item_type, item_name, response['status_code']))
# Process any associations with this item
if associations is not None:
for association_type in associations:
sub_endpoint = '{0}{1}/'.format(item_url, association_type)
self.modify_associations(sub_endpoint, associations[association_type])
# If we have an on_create method and we actually changed something we can call on_create
if on_create is not None and self.json_output['changed']:
on_create(self, response['json'])
elif auto_exit:
self.exit_json(**self.json_output)
else:
if response is not None:
last_data = response['json']
return last_data
else:
return
def _encrypted_changed_warning(self, field, old, warning=False):
if not warning:
return
self.warn(
'The field {0} of {1} {2} has encrypted data and may inaccurately report task is changed.'.format(
field, old.get('type', 'unknown'), old.get('id', 'unknown')
)
)
@staticmethod
def has_encrypted_values(obj):
"""Returns True if JSON-like python content in obj has $encrypted$
anywhere in the data as a value
"""
if isinstance(obj, dict):
for val in obj.values():
if ControllerAPIModule.has_encrypted_values(val):
return True
elif isinstance(obj, list):
for val in obj:
if ControllerAPIModule.has_encrypted_values(val):
return True
elif obj == ControllerAPIModule.ENCRYPTED_STRING:
return True
return False
@staticmethod
def fields_could_be_same(old_field, new_field):
"""Treating $encrypted$ as a wild card,
return False if the two values are KNOWN to be different
return True if the two values are the same, or could potentially be the same,
depending on the unknown $encrypted$ value or sub-values
"""
if isinstance(old_field, dict) and isinstance(new_field, dict):
if set(old_field.keys()) != set(new_field.keys()):
return False
for key in new_field.keys():
if not ControllerAPIModule.fields_could_be_same(old_field[key], new_field[key]):
return False
return True # all sub-fields are either equal or could be equal
else:
if old_field == ControllerAPIModule.ENCRYPTED_STRING:
return True
return bool(new_field == old_field)
def objects_could_be_different(self, old, new, field_set=None, warning=False):
if field_set is None:
field_set = set(fd for fd in new.keys() if fd not in ('modified', 'related', 'summary_fields'))
for field in field_set:
new_field = new.get(field, None)
old_field = old.get(field, None)
if old_field != new_field:
if self.update_secrets or (not self.fields_could_be_same(old_field, new_field)):
return True # Something doesn't match, or something might not match
elif self.has_encrypted_values(new_field) or field not in new:
if self.update_secrets or (not self.fields_could_be_same(old_field, new_field)):
# case of 'field not in new' - user password write-only field that API will not display
self._encrypted_changed_warning(field, old, warning=warning)
return True
return False
def update_if_needed(self, existing_item, new_item, on_update=None, auto_exit=True, associations=None):
# This will exit from the module on its own
# If the method successfully updates an item and on_update param is defined,
# the on_update parameter will be called as a method pasing in this object and the json from the response
# This will return one of two things:
# 1. None if the existing_item does not need to be updated
# 2. The response from AWX from patching to the endpoint. It's up to you to process the response and exit from the module.
# Note: common error codes from the AWX API can cause the module to fail
response = None
if existing_item:
# If we have an item, we can see if it needs an update
try:
item_url = existing_item['url']
item_type = existing_item['type']
if item_type == 'user':
item_name = existing_item['username']
elif item_type == 'workflow_job_template_node':
item_name = existing_item['identifier']
elif item_type == 'credential_input_source':
item_name = existing_item['id']
else:
item_name = existing_item['name']
item_id = existing_item['id']
except KeyError as ke:
self.fail_json(msg="Unable to process update of item due to missing data {0}".format(ke))
# Check to see if anything within the item requires the item to be updated
needs_patch = self.objects_could_be_different(existing_item, new_item)
# If we decided the item needs to be updated, update it
self.json_output['id'] = item_id
if needs_patch:
response = self.patch_endpoint(item_url, **{'data': new_item})
if response['status_code'] == 200:
# compare apples-to-apples, old API data to new API data
# but do so considering the fields given in parameters
self.json_output['changed'] = self.objects_could_be_different(existing_item, response['json'], field_set=new_item.keys(), warning=True)
elif 'json' in response and '__all__' in response['json']:
self.fail_json(msg=response['json']['__all__'])
else:
self.fail_json(**{'msg': "Unable to update {0} {1}, see response".format(item_type, item_name), 'response': response})
else:
raise RuntimeError('update_if_needed called incorrectly without existing_item')
# Process any associations with this item
if associations is not None:
for association_type, id_list in associations.items():
endpoint = '{0}{1}/'.format(item_url, association_type)
self.modify_associations(endpoint, id_list)
# If we change something and have an on_change call it
if on_update is not None and self.json_output['changed']:
if response is None:
last_data = existing_item
else:
last_data = response['json']
on_update(self, last_data)
elif auto_exit:
self.exit_json(**self.json_output)
else:
if response is None:
last_data = existing_item
else:
last_data = response['json']
return last_data
def create_or_update_if_needed(
self, existing_item, new_item, endpoint=None, item_type='unknown', on_create=None, on_update=None, auto_exit=True, associations=None
):
if existing_item:
return self.update_if_needed(existing_item, new_item, on_update=on_update, auto_exit=auto_exit, associations=associations)
else:
return self.create_if_needed(
existing_item, new_item, endpoint, on_create=on_create, item_type=item_type, auto_exit=auto_exit, associations=associations
)
def logout(self):
if self.authenticated and self.oauth_token_id:
# Attempt to delete our current token from /api/v2/tokens/
# Post to the tokens endpoint with baisc auth to try and get a token
api_token_url = (
self.url._replace(
path='/api/v2/tokens/{0}/'.format(self.oauth_token_id), query=None # in error cases, fail_json exists before exception handling
)
).geturl()
try:
self.session.open(
'DELETE',
api_token_url,
validate_certs=self.verify_ssl,
follow_redirects=True,
force_basic_auth=True,
url_username=self.username,
url_password=self.password,
)
self.oauth_token_id = None
self.authenticated = False
except HTTPError as he:
try:
resp = he.read()
except Exception as e:
resp = 'unknown {0}'.format(e)
self.warn('Failed to release token: {0}, response: {1}'.format(he, resp))
except (Exception) as e:
# Sanity check: Did the server send back some kind of internal error?
self.warn('Failed to release token {0}: {1}'.format(self.oauth_token_id, e))
def is_job_done(self, job_status):
if job_status in ['new', 'pending', 'waiting', 'running']:
return False
else:
return True
def wait_on_url(self, url, object_name, object_type, timeout=30, interval=10):
# Grab our start time to compare against for the timeout
start = time.time()
result = self.get_endpoint(url)
while not result['json']['finished']:
# If we are past our time out fail with a message
if timeout and timeout < time.time() - start:
# Account for Legacy messages
if object_type == 'legacy_job_wait':
self.json_output['msg'] = 'Monitoring of Job - {0} aborted due to timeout'.format(object_name)
else:
self.json_output['msg'] = 'Monitoring of {0} - {1} aborted due to timeout'.format(object_type, object_name)
self.wait_output(result)
self.fail_json(**self.json_output)
# Put the process to sleep for our interval
time.sleep(interval)
result = self.get_endpoint(url)
self.json_output['status'] = result['json']['status']
# If the job has failed, we want to raise a task failure for that so we get a non-zero response.
if result['json']['failed']:
# Account for Legacy messages
if object_type == 'legacy_job_wait':
self.json_output['msg'] = 'Job with id {0} failed'.format(object_name)
else:
self.json_output['msg'] = 'The {0} - {1}, failed'.format(object_type, object_name)
self.wait_output(result)
self.fail_json(**self.json_output)
self.wait_output(result)
return result
def wait_output(self, response):
for k in ('id', 'status', 'elapsed', 'started', 'finished'):
self.json_output[k] = response['json'].get(k)
def wait_on_workflow_node_url(self, url, object_name, object_type, timeout=30, interval=10, **kwargs):
# Grab our start time to compare against for the timeout
start = time.time()
result = self.get_endpoint(url, **kwargs)
while result["json"]["count"] == 0:
# If we are past our time out fail with a message
if timeout and timeout < time.time() - start:
# Account for Legacy messages
self.json_output["msg"] = "Monitoring of {0} - {1} aborted due to timeout, {2}".format(object_type, object_name, url)
self.wait_output(result)
self.fail_json(**self.json_output)
# Put the process to sleep for our interval
time.sleep(interval)
result = self.get_endpoint(url, **kwargs)
if object_type == "Workflow Approval":
# Approval jobs have no elapsed time so return
return result["json"]["results"][0]
else:
# Removed time so far from timeout.
revised_timeout = timeout - (time.time() - start)
# Now that Job has been found, wait for it to finish
result = self.wait_on_url(
url=result["json"]["results"][0]["related"]["job"],
object_name=object_name,
object_type=object_type,
timeout=revised_timeout,
interval=interval,
)
self.json_output["job_data"] = result["json"]
return result
| 50.250639
| 155
| 0.60258
|
0fec147788efbeb5b3d57e1ed378fe781c8410e1
| 1,492
|
py
|
Python
|
raspy/tests/test_IO/test_PinPollFailEvent.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/tests/test_IO/test_PinPollFailEvent.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/tests/test_IO/test_PinPollFailEvent.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
"""Test the PinPollFailEvent class."""
import threading
from pyee import EventEmitter
from raspy.io.pin_poll_fail_event import PinPollFailEvent
from raspy.io.io_exception import IOException
class DummyEmitter(object):
"""Dummy emitter for testing."""
__emitter = None
__evt = None
__pollThread = None
def __init__(self):
"""ctor."""
self.__emitter = EventEmitter()
def on(self, evt, callback):
"""Register event handler."""
self.__emitter.on(evt, callback)
def emit(self, evt, args):
"""Fire event."""
self.__emitter.emit(evt, args)
def on_poll_fail(self):
"""Fire pin poll faiure event."""
self.emit("pinPollFailed", self.__evt)
def poll(self):
"""Execute pin polling on background thread."""
ioEx = IOException("Poll failed.")
self.__evt = PinPollFailEvent(ioEx)
self.__pollThread = threading.Thread(target=self.on_poll_fail)
self.__pollThread.name = "DummyEmitterThread"
self.__pollThread.daemon = True
self.__pollThread.start()
class TestPinPollFailEvent(object):
"""Test pin polling fail event."""
def __fail_handler(self, failEvt):
assert isinstance(failEvt, PinPollFailEvent)
assert isinstance(failEvt.failure_cause, IOException)
def test_pin_poll_fail_event(self):
"""Test event."""
d = DummyEmitter()
d.on("pinPollFailed", self.__fail_handler)
d.poll()
| 27.127273
| 70
| 0.652815
|
7124590b8a6eb6cc249280466cb57028dc3f798d
| 262
|
py
|
Python
|
New-Egg_Spider/DataScraper/NewEgg/NewEgg/items.py
|
Ateridable/Portfolio
|
90f067fcf6bfe418065215ee7eb59777e950d0db
|
[
"CC-BY-3.0"
] | null | null | null |
New-Egg_Spider/DataScraper/NewEgg/NewEgg/items.py
|
Ateridable/Portfolio
|
90f067fcf6bfe418065215ee7eb59777e950d0db
|
[
"CC-BY-3.0"
] | null | null | null |
New-Egg_Spider/DataScraper/NewEgg/NewEgg/items.py
|
Ateridable/Portfolio
|
90f067fcf6bfe418065215ee7eb59777e950d0db
|
[
"CC-BY-3.0"
] | null | null | null |
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class NeweggItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 20.153846
| 53
| 0.71374
|
f9f8bc90944d02700679364404157c67663cdf8a
| 205
|
py
|
Python
|
envlogger/weather/urls.py
|
aquatix/envlogger
|
c5667d06a46c1890d145e81ac430fc5d7a4c4bc4
|
[
"Apache-2.0"
] | null | null | null |
envlogger/weather/urls.py
|
aquatix/envlogger
|
c5667d06a46c1890d145e81ac430fc5d7a4c4bc4
|
[
"Apache-2.0"
] | 7
|
2017-12-18T15:10:29.000Z
|
2021-12-12T11:21:47.000Z
|
envlogger/weather/urls.py
|
aquatix/envlogger
|
c5667d06a46c1890d145e81ac430fc5d7a4c4bc4
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^<int:user_id>/dashboard/$', views.weatherdashboard, name='weatherdashboard'),
]
| 22.777778
| 88
| 0.682927
|
622e6a0ce453456283ec45c5be85938ce49644ca
| 7,064
|
py
|
Python
|
tests/pipeline/test_frameload.py
|
otmaneJai/Zipline
|
2bb87cbd23fc960af2c46d9580baf95ed454c39a
|
[
"Apache-2.0"
] | null | null | null |
tests/pipeline/test_frameload.py
|
otmaneJai/Zipline
|
2bb87cbd23fc960af2c46d9580baf95ed454c39a
|
[
"Apache-2.0"
] | null | null | null |
tests/pipeline/test_frameload.py
|
otmaneJai/Zipline
|
2bb87cbd23fc960af2c46d9580baf95ed454c39a
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for zipline.pipeline.loaders.frame.DataFrameLoader.
"""
from unittest import TestCase
from mock import patch
from numpy import arange, ones
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import (
ADD,
DataFrameLoader,
MULTIPLY,
OVERWRITE,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = ones((len(self.dates), len(self.sids)), dtype=bool)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array(
[USEquityPricing.open], self.dates, self.sids, self.mask
)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.dates,
self.sids,
self.mask,
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(USEquityPricing.close, baseline)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.dates[dates_slice],
self.sids[sids_slice],
self.mask[dates_slice, sids_slice],
)
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(first_row=0, last_row=5, col=0, value=0.5),
Float64Add(first_row=0, last_row=5, col=1, value=1.0),
],
7: [
Float64Add(first_row=5, last_row=6, col=1, value=1.0),
],
8: [
Float64Overwrite(first_row=6, last_row=7, col=2, value=99.0)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask[dates_slice, sids_slice]
with patch('zipline.pipeline.loaders.frame.adjusted_array') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
dates=self.dates[dates_slice],
assets=self.sids[sids_slice],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
| 32.255708
| 79
| 0.521093
|
f344f085c0d5391ffa46957c8ec9a5e07a65a0b3
| 32,919
|
py
|
Python
|
compiler_gym/envs/llvm/datasets/cbench.py
|
sahirgomez1/CompilerGym
|
9987fbdfcf8ac9af076baf0ffd695e48f0e804cf
|
[
"MIT"
] | 562
|
2020-12-21T14:10:20.000Z
|
2022-03-31T21:23:55.000Z
|
compiler_gym/envs/llvm/datasets/cbench.py
|
sahirgomez1/CompilerGym
|
9987fbdfcf8ac9af076baf0ffd695e48f0e804cf
|
[
"MIT"
] | 433
|
2020-12-22T03:40:41.000Z
|
2022-03-31T18:16:17.000Z
|
compiler_gym/envs/llvm/datasets/cbench.py
|
sahirgomez1/CompilerGym
|
9987fbdfcf8ac9af076baf0ffd695e48f0e804cf
|
[
"MIT"
] | 88
|
2020-12-22T08:22:00.000Z
|
2022-03-20T19:00:40.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
import io
import logging
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
from collections import defaultdict
from pathlib import Path
from threading import Lock
from typing import Callable, Dict, List, NamedTuple, Optional
import fasteners
from compiler_gym.datasets import Benchmark, TarDatasetWithManifest
from compiler_gym.service.proto import BenchmarkDynamicConfig, Command
from compiler_gym.third_party import llvm
from compiler_gym.util.download import download
from compiler_gym.util.runfiles_path import cache_path, site_data_path
from compiler_gym.util.timer import Timer
from compiler_gym.validation_result import ValidationError
logger = logging.getLogger(__name__)
_CBENCH_TARS = {
"macos": (
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v1-macos.tar.bz2",
"90b312b40317d9ee9ed09b4b57d378879f05e8970bb6de80dc8581ad0e36c84f",
),
"linux": (
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v1-linux.tar.bz2",
"601fff3944c866f6617e653b6eb5c1521382c935f56ca1f36a9f5cf1a49f3de5",
),
}
_CBENCH_RUNTOME_DATA = (
"https://dl.fbaipublicfiles.com/compiler_gym/cBench-v0-runtime-data.tar.bz2",
"a1b5b5d6b115e5809ccaefc2134434494271d184da67e2ee43d7f84d07329055",
)
if sys.platform == "darwin":
_COMPILE_ARGS = [
"-L",
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib",
]
else:
_COMPILE_ARGS = []
class LlvmSanitizer(enum.IntEnum):
"""The LLVM sanitizers."""
ASAN = 1
TSAN = 2
MSAN = 3
UBSAN = 4
# Compiler flags that are enabled by sanitizers.
_SANITIZER_FLAGS = {
LlvmSanitizer.ASAN: ["-O1", "-g", "-fsanitize=address", "-fno-omit-frame-pointer"],
LlvmSanitizer.TSAN: ["-O1", "-g", "-fsanitize=thread"],
LlvmSanitizer.MSAN: ["-O1", "-g", "-fsanitize=memory"],
LlvmSanitizer.UBSAN: ["-fsanitize=undefined"],
}
class BenchmarkExecutionResult(NamedTuple):
"""The result of running a benchmark."""
walltime_seconds: float
"""The execution time in seconds."""
error: Optional[ValidationError] = None
"""An error."""
output: Optional[str] = None
"""The output generated by the benchmark."""
def json(self):
return self._asdict() # pylint: disable=no-member
def _compile_and_run_bitcode_file(
bitcode_file: Path,
cmd: str,
cwd: Path,
linkopts: List[str],
env: Dict[str, str],
num_runs: int,
sanitizer: Optional[LlvmSanitizer] = None,
timeout_seconds: float = 300,
compilation_timeout_seconds: float = 60,
) -> BenchmarkExecutionResult:
"""Run the given cBench benchmark."""
# cBench benchmarks expect that a file _finfo_dataset exists in the
# current working directory and contains the number of benchmark
# iterations in it.
with open(cwd / "_finfo_dataset", "w") as f:
print(num_runs, file=f)
# Create a barebones execution environment for the benchmark.
run_env = {
"TMPDIR": os.environ.get("TMPDIR", ""),
"HOME": os.environ.get("HOME", ""),
"USER": os.environ.get("USER", ""),
# Disable all logging from GRPC. In the past I have had false-positive
# "Wrong output" errors caused by GRPC error messages being logged to
# stderr.
"GRPC_VERBOSITY": "NONE",
}
run_env.update(env)
error_data = {}
if sanitizer:
clang_path = llvm.clang_path()
binary = cwd / "a.out"
error_data["run_cmd"] = cmd.replace("$BIN", "./a.out")
# Generate the a.out binary file.
compile_cmd = (
[clang_path.name, str(bitcode_file), "-o", str(binary)]
+ _COMPILE_ARGS
+ list(linkopts)
+ _SANITIZER_FLAGS.get(sanitizer, [])
)
error_data["compile_cmd"] = compile_cmd
logger.debug("compile: %s", compile_cmd)
assert not binary.is_file()
clang = subprocess.Popen(
compile_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
env={"PATH": f"{clang_path.parent}:{os.environ.get('PATH', '')}"},
)
try:
output, _ = clang.communicate(timeout=compilation_timeout_seconds)
except subprocess.TimeoutExpired:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
clang.kill()
else:
clang.terminate()
clang.communicate(timeout=30) # Wait for shutdown to complete.
error_data["timeout"] = compilation_timeout_seconds
return BenchmarkExecutionResult(
walltime_seconds=timeout_seconds,
error=ValidationError(
type="Compilation timeout",
data=error_data,
),
)
if clang.returncode:
error_data["output"] = output
return BenchmarkExecutionResult(
walltime_seconds=timeout_seconds,
error=ValidationError(
type="Compilation failed",
data=error_data,
),
)
assert binary.is_file()
else:
lli_path = llvm.lli_path()
error_data["run_cmd"] = cmd.replace("$BIN", f"{lli_path.name} benchmark.bc")
run_env["PATH"] = str(lli_path.parent)
try:
logger.debug("exec: %s", error_data["run_cmd"])
process = subprocess.Popen(
error_data["run_cmd"],
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=run_env,
cwd=cwd,
)
with Timer() as timer:
stdout, _ = process.communicate(timeout=timeout_seconds)
except subprocess.TimeoutExpired:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
process.communicate(timeout=30) # Wait for shutdown to complete.
error_data["timeout_seconds"] = timeout_seconds
return BenchmarkExecutionResult(
walltime_seconds=timeout_seconds,
error=ValidationError(
type="Execution timeout",
data=error_data,
),
)
finally:
if sanitizer:
binary.unlink()
try:
output = stdout.decode("utf-8")
except UnicodeDecodeError:
output = "<binary>"
if process.returncode:
# Runtime error.
if sanitizer == LlvmSanitizer.ASAN and "LeakSanitizer" in output:
error_type = "Memory leak"
elif sanitizer == LlvmSanitizer.ASAN and "AddressSanitizer" in output:
error_type = "Memory error"
elif sanitizer == LlvmSanitizer.MSAN and "MemorySanitizer" in output:
error_type = "Memory error"
elif "Segmentation fault" in output:
error_type = "Segmentation fault"
elif "Illegal Instruction" in output:
error_type = "Illegal Instruction"
else:
error_type = f"Runtime error ({process.returncode})"
error_data["return_code"] = process.returncode
error_data["output"] = output
return BenchmarkExecutionResult(
walltime_seconds=timer.time,
error=ValidationError(
type=error_type,
data=error_data,
),
)
return BenchmarkExecutionResult(walltime_seconds=timer.time, output=output)
def download_cBench_runtime_data() -> bool:
"""Download and unpack the cBench runtime dataset."""
cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data")
if (cbench_data / "unpacked").is_file():
return False
else:
# Clean up any partially-extracted data directory.
if cbench_data.is_dir():
shutil.rmtree(cbench_data)
url, sha256 = _CBENCH_RUNTOME_DATA
tar_contents = io.BytesIO(download(url, sha256))
with tarfile.open(fileobj=tar_contents, mode="r:bz2") as tar:
cbench_data.parent.mkdir(parents=True, exist_ok=True)
tar.extractall(cbench_data.parent)
assert cbench_data.is_dir()
# Create the marker file to indicate that the directory is unpacked
# and ready to go.
(cbench_data / "unpacked").touch()
return True
# Thread lock to prevent race on download_cBench_runtime_data() from
# multi-threading. This works in tandem with the inter-process file lock - both
# are required.
_CBENCH_DOWNLOAD_THREAD_LOCK = Lock()
def _make_cBench_validator(
cmd: str,
linkopts: List[str],
os_env: Dict[str, str],
num_runs: int = 1,
compare_output: bool = True,
input_files: Optional[List[Path]] = None,
output_files: Optional[List[Path]] = None,
validate_result: Optional[
Callable[[BenchmarkExecutionResult], Optional[str]]
] = None,
pre_execution_callback: Optional[Callable[[Path], None]] = None,
sanitizer: Optional[LlvmSanitizer] = None,
flakiness: int = 5,
) -> Callable[["LlvmEnv"], Optional[ValidationError]]: # noqa: F821
"""Construct a validation callback for a cBench benchmark. See validator() for usage."""
input_files = input_files or []
output_files = output_files or []
def validator_cb(env: "LlvmEnv") -> Optional[ValidationError]: # noqa: F821
"""The validation callback."""
with _CBENCH_DOWNLOAD_THREAD_LOCK:
with fasteners.InterProcessLock(cache_path("cbench-v1-runtime-data.LOCK")):
download_cBench_runtime_data()
cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data")
for input_file_name in input_files:
path = cbench_data / input_file_name
if not path.is_file():
raise FileNotFoundError(f"Required benchmark input not found: {path}")
# Create a temporary working directory to execute the benchmark in.
with tempfile.TemporaryDirectory(dir=env.service.connection.working_dir) as d:
cwd = Path(d)
# Expand shell variable substitutions in the benchmark command.
expanded_command = cmd.replace("$D", str(cbench_data))
# Translate the output file names into paths inside the working
# directory.
output_paths = [cwd / o for o in output_files]
if pre_execution_callback:
pre_execution_callback(cwd)
# Produce a gold-standard output using a reference version of
# the benchmark.
if compare_output or output_files:
gs_env = env.fork()
try:
# Reset to the original benchmark state and compile it.
gs_env.reset(benchmark=env.benchmark)
gs_env.write_bitcode(cwd / "benchmark.bc")
gold_standard = _compile_and_run_bitcode_file(
bitcode_file=cwd / "benchmark.bc",
cmd=expanded_command,
cwd=cwd,
num_runs=1,
# Use default optimizations for gold standard.
linkopts=linkopts + ["-O2"],
# Always assume safe.
sanitizer=None,
env=os_env,
)
if gold_standard.error:
return ValidationError(
type=f"Gold standard: {gold_standard.error.type}",
data=gold_standard.error.data,
)
finally:
gs_env.close()
# Check that the reference run produced the expected output
# files.
for path in output_paths:
if not path.is_file():
try:
output = gold_standard.output
except UnicodeDecodeError:
output = "<binary>"
raise FileNotFoundError(
f"Expected file '{path.name}' not generated\n"
f"Benchmark: {env.benchmark}\n"
f"Command: {cmd}\n"
f"Output: {output}"
)
path.rename(f"{path}.gold_standard")
# Serialize the benchmark to a bitcode file that will then be
# compiled to a binary.
env.write_bitcode(cwd / "benchmark.bc")
outcome = _compile_and_run_bitcode_file(
bitcode_file=cwd / "benchmark.bc",
cmd=expanded_command,
cwd=cwd,
num_runs=num_runs,
linkopts=linkopts,
sanitizer=sanitizer,
env=os_env,
)
if outcome.error:
return outcome.error
# Run a user-specified validation hook.
if validate_result:
validate_result(outcome)
# Difftest the console output.
if compare_output and gold_standard.output != outcome.output:
return ValidationError(
type="Wrong output",
data={"expected": gold_standard.output, "actual": outcome.output},
)
# Difftest the output files.
for path in output_paths:
if not path.is_file():
return ValidationError(
type="Output not generated",
data={"path": path.name, "command": cmd},
)
diff = subprocess.Popen(
["diff", str(path), f"{path}.gold_standard"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, _ = diff.communicate()
if diff.returncode:
try:
stdout = stdout.decode("utf-8")
return ValidationError(
type="Wrong output (file)",
data={"path": path.name, "diff": stdout},
)
except UnicodeDecodeError:
return ValidationError(
type="Wrong output (file)",
data={"path": path.name, "diff": "<binary>"},
)
def flaky_wrapped_cb(env: "LlvmEnv") -> Optional[ValidationError]: # noqa: F821
"""Wrap the validation callback in a flakiness retry loop."""
for j in range(1, max(flakiness, 1) + 1):
try:
error = validator_cb(env)
if not error:
return
except TimeoutError:
# Timeout errors can be raised by the environment in case of a
# slow step / observation, and should be retried.
pass
logger.warning("Validation callback failed, attempt=%d/%d", j, flakiness)
return error
return flaky_wrapped_cb
def validator(
benchmark: str,
cmd: str,
data: Optional[List[str]] = None,
outs: Optional[List[str]] = None,
platforms: Optional[List[str]] = None,
compare_output: bool = True,
validate_result: Optional[
Callable[[BenchmarkExecutionResult], Optional[str]]
] = None,
linkopts: Optional[List[str]] = None,
env: Optional[Dict[str, str]] = None,
pre_execution_callback: Optional[Callable[[], None]] = None,
sanitizers: Optional[List[LlvmSanitizer]] = None,
) -> bool:
"""Declare a new benchmark validator.
TODO(cummins): Pull this out into a public API.
:param benchmark: The name of the benchmark that this validator supports.
:cmd: The shell command to run the validation. Variable substitution is
applied to this value as follows: :code:`$BIN` is replaced by the path
of the compiled binary and :code:`$D` is replaced with the path to the
benchmark's runtime data directory.
:data: A list of paths to input files.
:outs: A list of paths to output files.
:return: :code:`True` if the new validator was registered, else :code:`False`.
"""
platforms = platforms or ["linux", "macos"]
if {"darwin": "macos"}.get(sys.platform, sys.platform) not in platforms:
return False
infiles = data or []
outfiles = [Path(p) for p in outs or []]
linkopts = linkopts or []
env = env or {}
if sanitizers is None:
sanitizers = LlvmSanitizer
VALIDATORS[benchmark].append(
_make_cBench_validator(
cmd=cmd,
input_files=infiles,
output_files=outfiles,
compare_output=compare_output,
validate_result=validate_result,
linkopts=linkopts,
os_env=env,
pre_execution_callback=pre_execution_callback,
)
)
# Register additional validators using the sanitizers.
if sys.platform.startswith("linux"):
for sanitizer in sanitizers:
VALIDATORS[benchmark].append(
_make_cBench_validator(
cmd=cmd,
input_files=infiles,
output_files=outfiles,
compare_output=compare_output,
validate_result=validate_result,
linkopts=linkopts,
os_env=env,
pre_execution_callback=pre_execution_callback,
sanitizer=sanitizer,
)
)
# Create the BenchmarkDynamicConfig object.
cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data")
DYNAMIC_CONFIGS[benchmark] = BenchmarkDynamicConfig(
build_cmd=Command(
argument=["$CC", "$IN"] + linkopts,
timeout_seconds=60,
outfile=["a.out"],
),
run_cmd=Command(
argument=cmd.replace("$BIN", "./a.out")
.replace("$D", str(cbench_data))
.split(),
timeout_seconds=300,
infile=["a.out", "_finfo_dataset"],
outfile=[str(s) for s in outfiles],
),
pre_run_cmd=[
Command(argument=["echo", "1", ">_finfo_dataset"], timeout_seconds=30),
],
)
return True
class CBenchBenchmark(Benchmark):
"""A cBench benchmmark."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for val in VALIDATORS.get(self.uri, []):
self.add_validation_callback(val)
self.proto.dynamic_config.MergeFrom(
DYNAMIC_CONFIGS.get(self.uri, BenchmarkDynamicConfig())
)
class CBenchDataset(TarDatasetWithManifest):
def __init__(self, site_data_base: Path):
platform = {"darwin": "macos"}.get(sys.platform, sys.platform)
url, sha256 = _CBENCH_TARS[platform]
super().__init__(
name="benchmark://cbench-v1",
description="Runnable C benchmarks",
license="BSD 3-Clause",
references={
"Paper": "https://arxiv.org/pdf/1407.3487.pdf",
"Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench",
},
tar_urls=[url],
tar_sha256=sha256,
manifest_urls=[
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cbench-v1-manifest.bz2"
],
manifest_sha256="eeffd7593aeb696a160fd22e6b0c382198a65d0918b8440253ea458cfe927741",
strip_prefix="cBench-v1",
benchmark_file_suffix=".bc",
benchmark_class=CBenchBenchmark,
site_data_base=site_data_base,
sort_order=-1,
validatable="Partially",
)
def install(self):
super().install()
with _CBENCH_DOWNLOAD_THREAD_LOCK:
with fasteners.InterProcessLock(cache_path("cbench-v1-runtime-data.LOCK")):
download_cBench_runtime_data()
class CBenchLegacyDataset2(TarDatasetWithManifest):
def __init__(
self,
site_data_base: Path,
sort_order: int = 0,
name="benchmark://cbench-v1",
manifest_url="https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cbench-v1-manifest.bz2",
manifest_sha256="eeffd7593aeb696a160fd22e6b0c382198a65d0918b8440253ea458cfe927741",
deprecated=None,
):
platform = {"darwin": "macos"}.get(sys.platform, sys.platform)
url, sha256 = _CBENCH_TARS[platform]
super().__init__(
name=name,
description="Runnable C benchmarks",
license="BSD 3-Clause",
references={
"Paper": "https://arxiv.org/pdf/1407.3487.pdf",
"Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench",
},
tar_urls=[url],
tar_sha256=sha256,
manifest_urls=[manifest_url],
manifest_sha256=manifest_sha256,
strip_prefix="cBench-v1",
benchmark_file_suffix=".bc",
site_data_base=site_data_base,
sort_order=sort_order,
benchmark_class=CBenchBenchmark,
deprecated=deprecated,
validatable="Partially",
)
# URLs of the deprecated cBench datasets.
_CBENCH_LEGACY_TARS = {
"macos": (
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-macos.tar.bz2",
"072a730c86144a07bba948c49afe543e4f06351f1cb17f7de77f91d5c1a1b120",
),
"linux": (
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-linux.tar.bz2",
"9b5838a90895579aab3b9375e8eeb3ed2ae58e0ad354fec7eb4f8b31ecb4a360",
),
}
class CBenchLegacyDataset(TarDatasetWithManifest):
# The difference between cbench-v0 and cbench-v1 is the arguments passed to
# clang when preparing the LLVM bitcodes:
#
# - v0: `-O0 -Xclang -disable-O0-optnone`.
# - v1: `-O1 -Xclang -Xclang -disable-llvm-passes`.
#
# The key difference with is that in v0, the generated IR functions were
# annotated with a `noinline` attribute that prevented inline. In v1 that is
# no longer the case.
def __init__(self, site_data_base: Path):
platform = {"darwin": "macos"}.get(sys.platform, sys.platform)
url, sha256 = _CBENCH_LEGACY_TARS[platform]
super().__init__(
name="benchmark://cBench-v0",
description="Runnable C benchmarks",
license="BSD 3-Clause",
references={
"Paper": "https://arxiv.org/pdf/1407.3487.pdf",
"Homepage": "https://ctuning.org/wiki/index.php/CTools:CBench",
},
tar_urls=[url],
tar_sha256=sha256,
manifest_urls=[
"https://dl.fbaipublicfiles.com/compiler_gym/llvm_bitcodes-10.0.0-cBench-v0-manifest.bz2"
],
manifest_sha256="635b94eeb2784dfedb3b53fd8f84517c3b4b95d851ddb662d4c1058c72dc81e0",
strip_prefix="cBench-v0",
benchmark_file_suffix=".bc",
site_data_base=site_data_base,
deprecated="Please use 'benchmark://cbench-v1'",
)
# ===============================
# Definition of cBench validators
# ===============================
# A map from benchmark name to validation callbacks.
VALIDATORS: Dict[
str, List[Callable[["LlvmEnv"], Optional[str]]] # noqa: F821
] = defaultdict(list)
# A map from benchmark name to BenchmarkDynamicConfig messages.
DYNAMIC_CONFIGS: Dict[str, Optional[BenchmarkDynamicConfig]] = {}
def validate_sha_output(result: BenchmarkExecutionResult) -> Optional[str]:
"""SHA benchmark prints 5 random hex strings. Normally these hex strings are
16 characters but occasionally they are less (presumably because of a
leading zero being omitted).
"""
try:
if not re.match(
r"[0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16} [0-9a-f]{0,16}",
result.output.rstrip(),
):
return "Failed to parse hex output"
except UnicodeDecodeError:
return "Failed to parse unicode output"
def setup_ghostscript_library_files(dataset_id: int) -> Callable[[Path], None]:
"""Make a pre-execution setup hook for ghostscript."""
def setup(cwd: Path):
cbench_data = site_data_path("llvm-v0/cbench-v1-runtime-data/runtime_data")
# Copy the input data file into the current directory since ghostscript
# doesn't like long input paths.
shutil.copyfile(
cbench_data / "office_data" / f"{dataset_id}.ps", cwd / "input.ps"
)
# Ghostscript doesn't like the library files being symlinks so copy them
# into the working directory as regular files.
for path in (cbench_data / "ghostscript").iterdir():
if path.name.endswith(".ps"):
shutil.copyfile(path, cwd / path.name)
return setup
validator(
benchmark="benchmark://cbench-v1/bitcount",
cmd="$BIN 1125000",
)
validator(
benchmark="benchmark://cbench-v1/bitcount",
cmd="$BIN 512",
)
for i in range(1, 21):
# NOTE(cummins): Disabled due to timeout errors, further investigation
# needed.
#
# validator(
# benchmark="benchmark://cbench-v1/adpcm",
# cmd=f"$BIN $D/telecom_data/{i}.adpcm",
# data=[f"telecom_data/{i}.adpcm"],
# )
#
# validator(
# benchmark="benchmark://cbench-v1/adpcm",
# cmd=f"$BIN $D/telecom_data/{i}.pcm",
# data=[f"telecom_data/{i}.pcm"],
# )
validator(
benchmark="benchmark://cbench-v1/blowfish",
cmd=f"$BIN d $D/office_data/{i}.benc output.txt 1234567890abcdeffedcba0987654321",
data=[f"office_data/{i}.benc"],
outs=["output.txt"],
)
validator(
benchmark="benchmark://cbench-v1/bzip2",
cmd=f"$BIN -d -k -f -c $D/bzip2_data/{i}.bz2",
data=[f"bzip2_data/{i}.bz2"],
)
validator(
benchmark="benchmark://cbench-v1/crc32",
cmd=f"$BIN $D/telecom_data/{i}.pcm",
data=[f"telecom_data/{i}.pcm"],
)
validator(
benchmark="benchmark://cbench-v1/dijkstra",
cmd=f"$BIN $D/network_dijkstra_data/{i}.dat",
data=[f"network_dijkstra_data/{i}.dat"],
)
validator(
benchmark="benchmark://cbench-v1/gsm",
cmd=f"$BIN -fps -c $D/telecom_gsm_data/{i}.au",
data=[f"telecom_gsm_data/{i}.au"],
)
# NOTE(cummins): ispell fails with returncode 1 and no output when run
# under safe optimizations.
#
# validator(
# benchmark="benchmark://cbench-v1/ispell",
# cmd=f"$BIN -a -d americanmed+ $D/office_data/{i}.txt",
# data = [f"office_data/{i}.txt"],
# )
validator(
benchmark="benchmark://cbench-v1/jpeg-c",
cmd=f"$BIN -dct int -progressive -outfile output.jpeg $D/consumer_jpeg_data/{i}.ppm",
data=[f"consumer_jpeg_data/{i}.ppm"],
outs=["output.jpeg"],
# NOTE(cummins): AddressSanitizer disabled because of
# global-buffer-overflow in regular build.
sanitizers=[LlvmSanitizer.TSAN, LlvmSanitizer.UBSAN],
)
validator(
benchmark="benchmark://cbench-v1/jpeg-d",
cmd=f"$BIN -dct int -outfile output.ppm $D/consumer_jpeg_data/{i}.jpg",
data=[f"consumer_jpeg_data/{i}.jpg"],
outs=["output.ppm"],
)
validator(
benchmark="benchmark://cbench-v1/patricia",
cmd=f"$BIN $D/network_patricia_data/{i}.udp",
data=[f"network_patricia_data/{i}.udp"],
env={
# NOTE(cummins): Benchmark leaks when executed with safe optimizations.
"ASAN_OPTIONS": "detect_leaks=0",
},
)
validator(
benchmark="benchmark://cbench-v1/qsort",
cmd=f"$BIN $D/automotive_qsort_data/{i}.dat",
data=[f"automotive_qsort_data/{i}.dat"],
outs=["sorted_output.dat"],
linkopts=["-lm"],
)
# NOTE(cummins): Rijndael benchmark disabled due to memory errors under
# basic optimizations.
#
# validator(benchmark="benchmark://cbench-v1/rijndael", cmd=f"$BIN
# $D/office_data/{i}.enc output.dec d
# 1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321",
# data=[f"office_data/{i}.enc"], outs=["output.dec"],
# )
#
# validator(benchmark="benchmark://cbench-v1/rijndael", cmd=f"$BIN
# $D/office_data/{i}.txt output.enc e
# 1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321",
# data=[f"office_data/{i}.txt"], outs=["output.enc"],
# )
validator(
benchmark="benchmark://cbench-v1/sha",
cmd=f"$BIN $D/office_data/{i}.txt",
data=[f"office_data/{i}.txt"],
compare_output=False,
validate_result=validate_sha_output,
)
validator(
benchmark="benchmark://cbench-v1/stringsearch",
cmd=f"$BIN $D/office_data/{i}.txt $D/office_data/{i}.s.txt output.txt",
data=[f"office_data/{i}.txt"],
outs=["output.txt"],
env={
# NOTE(cummins): Benchmark leaks when executed with safe optimizations.
"ASAN_OPTIONS": "detect_leaks=0",
},
linkopts=["-lm"],
)
# NOTE(cummins): The stringsearch2 benchmark has a very long execution time.
# Use only a single input to keep the validation time reasonable. I have
# also observed Segmentation fault on gold standard using 4.txt and 6.txt.
if i == 1:
validator(
benchmark="benchmark://cbench-v1/stringsearch2",
cmd=f"$BIN $D/office_data/{i}.txt $D/office_data/{i}.s.txt output.txt",
data=[f"office_data/{i}.txt"],
outs=["output.txt"],
env={
# NOTE(cummins): Benchmark leaks when executed with safe optimizations.
"ASAN_OPTIONS": "detect_leaks=0",
},
# TSAN disabled because of extremely long execution leading to
# timeouts.
sanitizers=[LlvmSanitizer.ASAN, LlvmSanitizer.MSAN, LlvmSanitizer.UBSAN],
)
validator(
benchmark="benchmark://cbench-v1/susan",
cmd=f"$BIN $D/automotive_susan_data/{i}.pgm output_large.corners.pgm -c",
data=[f"automotive_susan_data/{i}.pgm"],
outs=["output_large.corners.pgm"],
linkopts=["-lm"],
)
validator(
benchmark="benchmark://cbench-v1/tiff2bw",
cmd=f"$BIN $D/consumer_tiff_data/{i}.tif output.tif",
data=[f"consumer_tiff_data/{i}.tif"],
outs=["output.tif"],
linkopts=["-lm"],
env={
# NOTE(cummins): Benchmark leaks when executed with safe optimizations.
"ASAN_OPTIONS": "detect_leaks=0",
},
)
validator(
benchmark="benchmark://cbench-v1/tiff2rgba",
cmd=f"$BIN $D/consumer_tiff_data/{i}.tif output.tif",
data=[f"consumer_tiff_data/{i}.tif"],
outs=["output.tif"],
linkopts=["-lm"],
)
validator(
benchmark="benchmark://cbench-v1/tiffdither",
cmd=f"$BIN $D/consumer_tiff_data/{i}.bw.tif out.tif",
data=[f"consumer_tiff_data/{i}.bw.tif"],
outs=["out.tif"],
linkopts=["-lm"],
)
validator(
benchmark="benchmark://cbench-v1/tiffmedian",
cmd=f"$BIN $D/consumer_tiff_data/{i}.nocomp.tif output.tif",
data=[f"consumer_tiff_data/{i}.nocomp.tif"],
outs=["output.tif"],
linkopts=["-lm"],
)
# NOTE(cummins): On macOS the following benchmarks abort with an illegal
# hardware instruction error.
# if sys.platform != "darwin":
# validator(
# benchmark="benchmark://cbench-v1/lame",
# cmd=f"$BIN $D/consumer_data/{i}.wav output.mp3",
# data=[f"consumer_data/{i}.wav"],
# outs=["output.mp3"],
# compare_output=False,
# linkopts=["-lm"],
# )
# NOTE(cummins): Segfault on gold standard.
#
# validator(
# benchmark="benchmark://cbench-v1/ghostscript",
# cmd="$BIN -sDEVICE=ppm -dNOPAUSE -dQUIET -sOutputFile=output.ppm -- input.ps",
# data=[f"office_data/{i}.ps"],
# outs=["output.ppm"],
# linkopts=["-lm", "-lz"],
# pre_execution_callback=setup_ghostscript_library_files(i),
# )
| 35.977049
| 111
| 0.589933
|
f293d00b5b8b199b5e63d293fbc11502834e2466
| 38,071
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_service_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_service_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations/_service_operations.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations:
"""ServiceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.blob.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def set_properties(
self,
storage_service_properties: "_models.StorageServiceProperties",
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> None:
"""Sets properties for a storage account's Blob service endpoint, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param storage_service_properties: The StorageService properties.
:type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_properties.metadata = {'url': '/'} # type: ignore
async def get_properties(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.StorageServiceProperties":
"""gets the properties of a storage account's Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceProperties, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('StorageServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_properties.metadata = {'url': '/'} # type: ignore
async def get_statistics(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.StorageServiceStats":
"""Retrieves statistics related to replication for the Blob service. It is only available on the
secondary location endpoint when read-access geo-redundant replication is enabled for the
storage account.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceStats, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "stats"
accept = "application/xml"
# Construct URL
url = self.get_statistics.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('StorageServiceStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_statistics.metadata = {'url': '/'} # type: ignore
async def list_containers_segment(
self,
prefix: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.ListContainersSegmentResponse":
"""The List Containers Segment operation returns a list of the containers under the specified
account.
:param prefix: Filters the results to return only containers whose name begins with the
specified prefix.
:type prefix: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:param include: Include this parameter to specify that the container's metadata be returned as
part of the response body.
:type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType]
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainersSegmentResponse, or the result of cls(response)
:rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "list"
accept = "application/xml"
# Construct URL
url = self.list_containers_segment.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
if include is not None:
query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_containers_segment.metadata = {'url': '/'} # type: ignore
async def get_user_delegation_key(
self,
key_info: "_models.KeyInfo",
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> "_models.UserDelegationKey":
"""Retrieves a user delegation key for the Blob service. This is only a valid operation when using
bearer token authentication.
:param key_info:
:type key_info: ~azure.storage.blob.models.KeyInfo
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserDelegationKey, or the result of cls(response)
:rtype: ~azure.storage.blob.models.UserDelegationKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "userdelegationkey"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.get_user_delegation_key.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('UserDelegationKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_user_delegation_key.metadata = {'url': '/'} # type: ignore
async def get_account_info(
self,
**kwargs
) -> None:
"""Returns the sku name and account kind.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "account"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_account_info.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))
response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))
response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled'))
if cls:
return cls(pipeline_response, None, response_headers)
get_account_info.metadata = {'url': '/'} # type: ignore
async def submit_batch(
self,
content_length: int,
multipart_content_type: str,
body: IO,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs
) -> IO:
"""The Batch operation allows multiple API calls to be embedded into a single HTTP request.
:param content_length: The length of the request.
:type content_length: long
:param multipart_content_type: Required. The value of this header must be multipart/mixed with
a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:`<GUID>`.
:type multipart_content_type: str
:param body: Initial data.
:type body: IO
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "batch"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.submit_batch.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IO', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
submit_batch.metadata = {'url': '/'} # type: ignore
async def filter_blobs(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
where: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
**kwargs
) -> "_models.FilterBlobSegment":
"""The Filter Blobs operation enables callers to list blobs across all containers whose tags match
a given search expression. Filter blobs searches across all containers within a storage
account but can be scoped within the expression to a single container.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-
timeouts-for-blob-service-operations">Setting Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param where: Filters the results to return only to return only blobs whose tags match the
specified expression.
:type where: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FilterBlobSegment, or the result of cls(response)
:rtype: ~azure.storage.blob.models.FilterBlobSegment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "blobs"
accept = "application/xml"
# Construct URL
url = self.filter_blobs.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if where is not None:
query_parameters['where'] = self._serialize.query("where", where, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('FilterBlobSegment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
filter_blobs.metadata = {'url': '/'} # type: ignore
| 55.015896
| 133
| 0.678863
|
5a37a673ee80cc522d2b856d9749044d0ee5d759
| 6,459
|
py
|
Python
|
python/pyspark/sql/group.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | 11
|
2016-05-26T12:06:38.000Z
|
2020-07-06T20:37:07.000Z
|
python/pyspark/sql/group.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | 2
|
2015-10-22T07:30:24.000Z
|
2015-10-28T10:10:06.000Z
|
python/pyspark/sql/group.py
|
bopopescu/wso2-spark
|
6982456ded39a8fef0ad26600218f8f575aac2a5
|
[
"Apache-2.0",
"MIT"
] | 9
|
2016-07-29T01:13:50.000Z
|
2020-07-23T16:16:17.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql import since
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import *
__all__ = ["GroupedData"]
def dfapi(f):
def _api(self):
name = f.__name__
jdf = getattr(self._jdf, name)()
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
def df_varargs_api(f):
def _api(self, *args):
name = f.__name__
jdf = getattr(self._jdf, name)(_to_seq(self.sql_ctx._sc, args))
return DataFrame(jdf, self.sql_ctx)
_api.__name__ = f.__name__
_api.__doc__ = f.__doc__
return _api
class GroupedData(object):
"""
A set of methods for aggregations on a :class:`DataFrame`,
created by :func:`DataFrame.groupBy`.
.. note:: Experimental
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
@ignore_unicode_prefix
@since(1.3)
def agg(self, *exprs):
"""Compute aggregates and returns the result as a :class:`DataFrame`.
The available aggregate functions are `avg`, `max`, `min`, `sum`, `count`.
If ``exprs`` is a single :class:`dict` mapping from string to string, then the key
is the column to perform aggregation on, and the value is the aggregate function.
Alternatively, ``exprs`` can also be a list of aggregate :class:`Column` expressions.
:param exprs: a dict mapping from column name (string) to aggregate functions (string),
or a list of :class:`Column`.
>>> gdf = df.groupBy(df.name)
>>> gdf.agg({"*": "count"}).collect()
[Row(name=u'Alice', COUNT(1)=1), Row(name=u'Bob', COUNT(1)=1)]
>>> from pyspark.sql import functions as F
>>> gdf.agg(F.min(df.age)).collect()
[Row(name=u'Alice', MIN(age)=2), Row(name=u'Bob', MIN(age)=5)]
"""
assert exprs, "exprs should not be empty"
if len(exprs) == 1 and isinstance(exprs[0], dict):
jdf = self._jdf.agg(exprs[0])
else:
# Columns
assert all(isinstance(c, Column) for c in exprs), "all exprs should be Column"
jdf = self._jdf.agg(exprs[0]._jc,
_to_seq(self.sql_ctx._sc, [c._jc for c in exprs[1:]]))
return DataFrame(jdf, self.sql_ctx)
@dfapi
@since(1.3)
def count(self):
"""Counts the number of records for each group.
>>> df.groupBy(df.age).count().collect()
[Row(age=2, count=1), Row(age=5, count=1)]
"""
@df_varargs_api
@since(1.3)
def mean(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().mean('age').collect()
[Row(AVG(age)=3.5)]
>>> df3.groupBy().mean('age', 'height').collect()
[Row(AVG(age)=3.5, AVG(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def avg(self, *cols):
"""Computes average values for each numeric columns for each group.
:func:`mean` is an alias for :func:`avg`.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().avg('age').collect()
[Row(AVG(age)=3.5)]
>>> df3.groupBy().avg('age', 'height').collect()
[Row(AVG(age)=3.5, AVG(height)=82.5)]
"""
@df_varargs_api
@since(1.3)
def max(self, *cols):
"""Computes the max value for each numeric columns for each group.
>>> df.groupBy().max('age').collect()
[Row(MAX(age)=5)]
>>> df3.groupBy().max('age', 'height').collect()
[Row(MAX(age)=5, MAX(height)=85)]
"""
@df_varargs_api
@since(1.3)
def min(self, *cols):
"""Computes the min value for each numeric column for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().min('age').collect()
[Row(MIN(age)=2)]
>>> df3.groupBy().min('age', 'height').collect()
[Row(MIN(age)=2, MIN(height)=80)]
"""
@df_varargs_api
@since(1.3)
def sum(self, *cols):
"""Compute the sum for each numeric columns for each group.
:param cols: list of column names (string). Non-numeric columns are ignored.
>>> df.groupBy().sum('age').collect()
[Row(SUM(age)=7)]
>>> df3.groupBy().sum('age', 'height').collect()
[Row(SUM(age)=7, SUM(height)=165)]
"""
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.group
globs = pyspark.sql.group.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80),
Row(name='Bob', age=5, height=85)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.group, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 32.954082
| 95
| 0.608608
|
c90a48f5af8dccaa380d974934a153b6d935bd78
| 13,132
|
py
|
Python
|
cleaning_helper1.py
|
dan0mun/WebContentClassifier
|
07d07b49f8fb5cb9413f1ee8a3952fe1bee08ee6
|
[
"Apache-2.0"
] | null | null | null |
cleaning_helper1.py
|
dan0mun/WebContentClassifier
|
07d07b49f8fb5cb9413f1ee8a3952fe1bee08ee6
|
[
"Apache-2.0"
] | null | null | null |
cleaning_helper1.py
|
dan0mun/WebContentClassifier
|
07d07b49f8fb5cb9413f1ee8a3952fe1bee08ee6
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import string
import re
from nltk.corpus import stopwords
nltk_stopwords = stopwords.words('english')
remove_punctuation = '!"$%&\'()*+,-./:;<=>?@[\\]“”^_`{|}~’'
def clean_column(dataframe, column_to_clean, new_col):
df_copy = dataframe.copy()
df_copy['copied_column'] = df_copy[column_to_clean]
df_copy['copied_column'] = df_copy['copied_column'].str.lower()
cleaned_column = []
for label in df_copy.index:
row = df_copy.loc[label, :]['copied_column']
clean = [x for x in row.split() if x not in string.punctuation]
clean = [x for x in clean if x not in nltk_stopwords]
clean = [x for x in clean if x not in string.digits]
clean = [x for x in clean if x not in remove_punctuation]
clean = [x for x in clean if len(x) != 1]
clean = " ".join(clean)
clean = clean.strip()
cleaned_column.append(clean)
df_copy[new_col] = cleaned_column
del df_copy['copied_column']
return df_copy
def filtration(dataframe, column):
# clean = list(map(lambda x: x.replace("#", ""), clean)) #we want to maintain hashtags!
dataframe[column] = dataframe[column].apply(lambda x: x.replace('"', ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("’", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(":", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("…", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(".",""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("⋆", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ⋆ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("$", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(",", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" alime ", " all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" alltime ", " all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(";", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("alime", "all time "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("atm", "at the moment"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ath ", " all time high "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("str8", "straight"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" v ", " very "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" #d", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ddos ", " distributed denial of service "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("btce", "btc"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("bitcoina", "bitcoin"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("rbitcoin", "bitcoin"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" – ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("->", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ➤ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("◄►", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("◄", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ur ", " your "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" u ", " you "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("forthen", "for then"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(">", "greater than"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("<", "less than"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("lt", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("gt", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(":", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("&", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ampamp", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" amp ", " and "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("amp", "and"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" bu ", " but "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("/", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("...", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("(", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(")", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("“", '"'))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("”", '"'))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("‘", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("’", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("-"," "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("*", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("!", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("⬛️", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\u200d", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f986", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f942", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f92f", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001f911", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("\U0001F193", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ⭕ ", " "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("🤔", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("☞ ", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("[", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("]", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("{", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("}", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ô", "o"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ó", "o"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("é", "e"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ï","i"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("®", ""))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("á", "a"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ã", "a"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace("ç", "c"))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jan ", " january "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" feb ", " february "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" mar ", " march "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" apr ", " april "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jun ", " june "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" jul ", " july "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" aug ", " august "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" sept ", " september "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" oct ", " october "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" nov ", " november "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" dec ", " december "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" washinon ", " washington "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" dming ", " direct messaging "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" cust ", " customer "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" wcust ", " with customer "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" cc ", " credit card "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" gopros ", " go pros "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ultimatelyi ", " ultimately i "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 1hr ", " one hour "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" rep ", " representative "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" wunited ", " with united "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" mp# ", " mileage plus number "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" hrs ", " hours "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 4hours ", " four hours "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" laxewr ", " lax ewr "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" iadlax ", " iad lax "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" julystill ", " july still "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 30mins ", " 30 minutes "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" mins ", " minutes "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 5hours ", " 5 hours "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" checkhowever ", " check however "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" familyno ", " family "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 2nd ", " second "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 6hour ", " six hour "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" cuz ", " because "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" cause ", " because "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" ideabuy ", " idea buy "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" fixem ", " fix them "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" properthey ", " proper they "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" americanair ", " american air "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" yea ", " yes "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" gnteed ", " guaranteed "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 6mo ", " 6 months "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" believei ", " believe "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" btw ", " by the way "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" intl ", " international "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" thxs ", " thanks "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" plususual ", " plus usual "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" fridaycant ", " friday can not "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" lhr ", " 1 hour "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" wheelsup ", " wheels up "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" tryna ", " try and "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 2hours ", " 2 hours "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" 1st ", " first "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" creditcard ", " credit card "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" luv ", " love "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" obv ", " obviously "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" patientyou ", " patient you "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" youwe ", " you have "))
dataframe[column] = dataframe[column].apply(lambda x: x.replace(" uraniumone ", " uranium one "))
| 80.073171
| 114
| 0.651157
|
7697b09b9cc20f74f5a77106cbcda72c4886c594
| 61,856
|
py
|
Python
|
src/oci/oda/oda_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/oda/oda_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/oda/oda_client.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import oda_type_mapping
missing = Sentinel("Missing")
class OdaClient(object):
"""
API to create and maintain Oracle Digital Assistant service instances.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20190506',
'service_endpoint_template': 'https://digitalassistant-api.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
self.base_client = BaseClient("oda", config, signer, oda_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
def change_oda_instance_compartment(self, oda_instance_id, change_oda_instance_compartment_details, **kwargs):
"""
Moves an Digital Assistant instance into a different compartment. When provided, If-Match is checked against
ETag values of the resource.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param oci.oda.models.ChangeOdaInstanceCompartmentDetails change_oda_instance_compartment_details: (required)
The compartment to which the Digital Assistant instance should be moved.
:param str if_match: (optional)
For optimistic concurrency control in a PUT or DELETE call for
a Digital Assistant instance, set the `if-match` query parameter
to the value of the `ETAG` header from a previous GET or POST
response for that instance. The service updates or deletes the
instance only if the etag that you provide matches the instance's
current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so that you can retry the request if there's
a timeout or server error without the risk of executing that same action again.
Retry tokens expire after 24 hours, but they can become invalid before then if there are
conflicting operations. For example, if an instance was deleted and purged from the system,
then the service might reject a retry of the original creation request.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/change_oda_instance_compartment.py.html>`__ to see an example of how to use change_oda_instance_compartment API.
"""
resource_path = "/odaInstances/{odaInstanceId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_oda_instance_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_oda_instance_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_oda_instance_compartment_details)
def create_oda_instance(self, create_oda_instance_details, **kwargs):
"""
Starts an asynchronous job to create a Digital Assistant instance.
To monitor the status of the job, take the `opc-work-request-id` response
header value and use it to call `GET /workRequests/{workRequestID}`.
:param oci.oda.models.CreateOdaInstanceDetails create_oda_instance_details: (required)
Details for the new Digital Assistant instance.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so that you can retry the request if there's
a timeout or server error without the risk of executing that same action again.
Retry tokens expire after 24 hours, but they can become invalid before then if there are
conflicting operations. For example, if an instance was deleted and purged from the system,
then the service might reject a retry of the original creation request.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.oda.models.OdaInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/create_oda_instance.py.html>`__ to see an example of how to use create_oda_instance API.
"""
resource_path = "/odaInstances"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_oda_instance_details,
response_type="OdaInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_oda_instance_details,
response_type="OdaInstance")
def delete_oda_instance(self, oda_instance_id, **kwargs):
"""
Starts an asynchronous job to delete the specified Digital Assistant instance.
To monitor the status of the job, take the `opc-work-request-id` response header value and use it to call `GET /workRequests/{workRequestID}`.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param str if_match: (optional)
For optimistic concurrency control in a PUT or DELETE call for
a Digital Assistant instance, set the `if-match` query parameter
to the value of the `ETAG` header from a previous GET or POST
response for that instance. The service updates or deletes the
instance only if the etag that you provide matches the instance's
current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/delete_oda_instance.py.html>`__ to see an example of how to use delete_oda_instance API.
"""
resource_path = "/odaInstances/{odaInstanceId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_oda_instance(self, oda_instance_id, **kwargs):
"""
Gets the specified Digital Assistant instance.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.oda.models.OdaInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/get_oda_instance.py.html>`__ to see an example of how to use get_oda_instance API.
"""
resource_path = "/odaInstances/{odaInstanceId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="OdaInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="OdaInstance")
def get_work_request(self, work_request_id, **kwargs):
"""
Gets information about the work request with the specified ID, including its status.
You can use this operation to monitor the status of jobs that you
requested to create, delete, and update instances.
:param str work_request_id: (required)
The identifier of the asynchronous work request.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.oda.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def list_oda_instances(self, compartment_id, **kwargs):
"""
Returns a page of Digital Assistant instances that belong to the specified
compartment.
If the `opc-next-page` header appears in the response, then
there are more items to retrieve. To get the next page in the subsequent
GET request, include the header's value as the `page` query parameter.
:param str compartment_id: (required)
List the Digital Assistant instances that belong to this compartment.
:param str display_name: (optional)
List only the information for the Digital Assistant instance with this user-friendly name. These names don't have to be unique and may change.
Example: `My new resource`
:param str lifecycle_state: (optional)
List only the Digital Assistant instances that are in this lifecycle state.
Allowed values are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED"
:param int limit: (optional)
The maximum number of items to return per page.
:param str page: (optional)
The page at which to start retrieving results.
You get this value from the `opc-next-page` header in a previous list request.
To retireve the first page, omit this query parameter.
Example: `MToxMA==`
:param str sort_order: (optional)
Sort the results in this order, use either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Sort on this field. You can specify one sort order only. The default sort field is `TIMECREATED`.
The default sort order for `TIMECREATED` is descending, and the default sort order for `DISPLAYNAME` is ascending.
Allowed values are: "TIMECREATED", "DISPLAYNAME"
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.oda.models.OdaInstanceSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/list_oda_instances.py.html>`__ to see an example of how to use list_oda_instances API.
"""
resource_path = "/odaInstances"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"display_name",
"lifecycle_state",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_oda_instances got unknown kwargs: {!r}".format(extra_kwargs))
if 'lifecycle_state' in kwargs:
lifecycle_state_allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "DELETING", "DELETED", "FAILED"]
if kwargs['lifecycle_state'] not in lifecycle_state_allowed_values:
raise ValueError(
"Invalid value for `lifecycle_state`, must be one of {0}".format(lifecycle_state_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "DISPLAYNAME"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"displayName": kwargs.get("display_name", missing),
"lifecycleState": kwargs.get("lifecycle_state", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[OdaInstanceSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[OdaInstanceSummary]")
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Returns a page of errors for the specified work request.
If the `opc-next-page` header appears in the response, then
there are more items to retrieve. To get the next page in the subsequent
GET request, include the header's value as the `page` query parameter.
:param str work_request_id: (required)
The identifier of the asynchronous work request.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str page: (optional)
The page at which to start retrieving results.
You get this value from the `opc-next-page` header in a previous list request.
To retireve the first page, omit this query parameter.
Example: `MToxMA==`
:param int limit: (optional)
The maximum number of items to return per page.
:param str sort_by: (optional)
The field to sort by. You can specify only one sort order. If no value is specified, then the default is `TIMESTAMP`.
The default sort order for both `TIMESTAMP` and `CODE` is ascending.
Allowed values are: "CODE", "TIMESTAMP"
:param str sort_order: (optional)
Sort the results in this order, use either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.oda.models.WorkRequestError`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["CODE", "TIMESTAMP"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestError]")
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Returns a page of of log messages for a given work request.
If the `opc-next-page` header appears in the response, then
there are more items to retrieve. To get the next page in the subsequent
GET request, include the header's value as the `page` query parameter.
:param str work_request_id: (required)
The identifier of the asynchronous work request.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str page: (optional)
The page at which to start retrieving results.
You get this value from the `opc-next-page` header in a previous list request.
To retireve the first page, omit this query parameter.
Example: `MToxMA==`
:param int limit: (optional)
The maximum number of items to return per page.
:param str sort_by: (optional)
The field to sort by. You can specify only one sort order. If no value is specified, then the default is `TIMESTAMP`.
The default sort order for both `TIMESTAMP` and `MESSAGE` is ascending.
Allowed values are: "MESSAGE", "TIMESTAMP"
:param str sort_order: (optional)
Sort the results in this order, use either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.oda.models.WorkRequestLogEntry`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["MESSAGE", "TIMESTAMP"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestLogEntry]")
def list_work_requests(self, compartment_id, **kwargs):
"""
Returns a page of work requests for the specified compartment.
If the `opc-next-page` header appears in the response, then
there are more items to retrieve. To get the next page in the subsequent
GET request, include the header's value as the `page` query parameter.
:param str compartment_id: (required)
List the Digital Assistant instances that belong to this compartment.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str oda_instance_id: (optional)
List only the information for this Digital Assistant instance.
:param str page: (optional)
The page at which to start retrieving results.
You get this value from the `opc-next-page` header in a previous list request.
To retireve the first page, omit this query parameter.
Example: `MToxMA==`
:param int limit: (optional)
The maximum number of items to return per page.
:param str sort_by: (optional)
The field to sort by. You can specify only one sort order. If no value is specified, then the default is `TIME_ACCEPTED`.
The default sort order for the time fields is descending. The default order for `DISPLAYNAME` and `STATUS` is ascending.default: TIME_ACCEPTED
Allowed values are: "OPERATION_TYPE", "STATUS", "TIME_ACCEPTED", "TIME_STARTED", "TIME_FINISHED"
:param str sort_order: (optional)
Sort the results in this order, use either `ASC` (ascending) or `DESC` (descending).
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type list of :class:`~oci.oda.models.WorkRequestSummary`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"oda_instance_id",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["OPERATION_TYPE", "STATUS", "TIME_ACCEPTED", "TIME_STARTED", "TIME_FINISHED"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"odaInstanceId": kwargs.get("oda_instance_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="list[WorkRequestSummary]")
def start_oda_instance(self, oda_instance_id, **kwargs):
"""
Starts an inactive Digital Assistant instance. Once active, the instance will be accessible and metering
of requests will be started again.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str if_match: (optional)
For optimistic concurrency control in a PUT or DELETE call for
a Digital Assistant instance, set the `if-match` query parameter
to the value of the `ETAG` header from a previous GET or POST
response for that instance. The service updates or deletes the
instance only if the etag that you provide matches the instance's
current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so that you can retry the request if there's
a timeout or server error without the risk of executing that same action again.
Retry tokens expire after 24 hours, but they can become invalid before then if there are
conflicting operations. For example, if an instance was deleted and purged from the system,
then the service might reject a retry of the original creation request.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/start_oda_instance.py.html>`__ to see an example of how to use start_oda_instance API.
"""
resource_path = "/odaInstances/{odaInstanceId}/actions/start"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"start_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def stop_oda_instance(self, oda_instance_id, **kwargs):
"""
Stops an active Digital Assistant instance. Once inactive, the instance will not be accessible and metering
of requests will be stopped until the instance is started again. Data associated with the instance
is not affected.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param str if_match: (optional)
For optimistic concurrency control in a PUT or DELETE call for
a Digital Assistant instance, set the `if-match` query parameter
to the value of the `ETAG` header from a previous GET or POST
response for that instance. The service updates or deletes the
instance only if the etag that you provide matches the instance's
current etag value.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so that you can retry the request if there's
a timeout or server error without the risk of executing that same action again.
Retry tokens expire after 24 hours, but they can become invalid before then if there are
conflicting operations. For example, if an instance was deleted and purged from the system,
then the service might reject a retry of the original creation request.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/stop_oda_instance.py.html>`__ to see an example of how to use stop_oda_instance API.
"""
resource_path = "/odaInstances/{odaInstanceId}/actions/stop"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match",
"opc_retry_token"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"stop_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing),
"opc-retry-token": kwargs.get("opc_retry_token", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def update_oda_instance(self, oda_instance_id, update_oda_instance_details, **kwargs):
"""
Updates the specified Digital Assistant instance with the information in the request body.
:param str oda_instance_id: (required)
Unique Digital Assistant instance identifier.
:param oci.oda.models.UpdateOdaInstanceDetails update_oda_instance_details: (required)
The information to update.
:param str if_match: (optional)
For optimistic concurrency control in a PUT or DELETE call for
a Digital Assistant instance, set the `if-match` query parameter
to the value of the `ETAG` header from a previous GET or POST
response for that instance. The service updates or deletes the
instance only if the etag that you provide matches the instance's
current etag value.
:param str opc_request_id: (optional)
The client request ID for tracing. This value is included in the opc-request-id response header.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.oda.models.OdaInstance`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/oda/update_oda_instance.py.html>`__ to see an example of how to use update_oda_instance API.
"""
resource_path = "/odaInstances/{odaInstanceId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_oda_instance got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"odaInstanceId": oda_instance_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.retry_strategy
if kwargs.get('retry_strategy'):
retry_strategy = kwargs.get('retry_strategy')
if retry_strategy:
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_oda_instance_details,
response_type="OdaInstance")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_oda_instance_details,
response_type="OdaInstance")
| 47.074581
| 245
| 0.639566
|
dfedbe7abd4b9814ee41825b8316f2cf50c034dd
| 4,534
|
py
|
Python
|
metpy/plots/tests/test_util.py
|
ahuang11/MetPy
|
6116ceca6c48fda7e4ead254d2a83ebf86391e7a
|
[
"BSD-3-Clause"
] | 1
|
2019-09-15T18:02:59.000Z
|
2019-09-15T18:02:59.000Z
|
metpy/plots/tests/test_util.py
|
e-dennis/MetPy
|
6529608d956039d4791a17a7bdb1a2c0bf97cd75
|
[
"BSD-3-Clause"
] | null | null | null |
metpy/plots/tests/test_util.py
|
e-dennis/MetPy
|
6529608d956039d4791a17a7bdb1a2c0bf97cd75
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `_util` module."""
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from metpy.plots import add_metpy_logo, add_timestamp, add_unidata_logo, convert_gempak_color
# Fixture to make sure we have the right backend
from metpy.testing import set_agg_backend # noqa: F401, I202
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(tolerance=0.01, remove_text=True)
def test_add_timestamp():
"""Test adding a timestamp to an axes object."""
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(1, 1, 1)
add_timestamp(ax, time=datetime(2017, 1, 1))
return fig
@pytest.mark.mpl_image_compare(tolerance=0.01, remove_text=True)
def test_add_timestamp_custom_format():
"""Test adding a timestamp to an axes object with custom time formatting."""
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(1, 1, 1)
add_timestamp(ax, time=datetime(2017, 1, 1), time_format='%H:%M:%S %Y/%m/%d')
return fig
@pytest.mark.mpl_image_compare(tolerance=0.01, remove_text=True)
def test_add_timestamp_pretext():
"""Test adding a timestamp to an axes object with custom pre-text."""
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(1, 1, 1)
add_timestamp(ax, time=datetime(2017, 1, 1), pretext='Valid: ')
return fig
@pytest.mark.mpl_image_compare(tolerance={'2.0': 0.21}.get(MPL_VERSION, 0.01),
remove_text=True)
def test_add_timestamp_high_contrast():
"""Test adding a timestamp to an axes object."""
fig = plt.figure(figsize=(9, 9))
ax = plt.subplot(1, 1, 1)
add_timestamp(ax, time=datetime(2017, 1, 1), high_contrast=True)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.004, remove_text=True)
def test_add_metpy_logo_small():
"""Test adding a MetPy logo to a figure."""
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.004, remove_text=True)
def test_add_metpy_logo_large():
"""Test adding a large MetPy logo to a figure."""
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, size='large')
return fig
@pytest.mark.mpl_image_compare(tolerance=0.004, remove_text=True)
def test_add_unidata_logo():
"""Test adding a Unidata logo to a figure."""
fig = plt.figure(figsize=(9, 9))
add_unidata_logo(fig)
return fig
def test_add_logo_invalid_size():
"""Test adding a logo to a figure with an invalid size specification."""
fig = plt.figure(figsize=(9, 9))
with pytest.raises(ValueError):
add_metpy_logo(fig, size='jumbo')
@pytest.mark.mpl_image_compare(tolerance=0.01, remove_text=True)
def test_gempak_color_image_compare():
"""Test creating a plot with all the GEMPAK colors."""
c = range(32)
mplc = convert_gempak_color(c)
delta = 0.025
x = y = np.arange(-3.0, 3.01, delta)
xx, yy = np.meshgrid(x, y)
z1 = np.exp(-xx**2 - yy**2)
z2 = np.exp(-(xx - 1)**2 - (yy - 1)**2)
z = (z1 - z2) * 2
fig = plt.figure(figsize=(9, 9))
cs = plt.contourf(xx, yy, z, levels=np.linspace(-1.8, 1.8, 33), colors=mplc)
plt.colorbar(cs)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.01, remove_text=True)
def test_gempak_color_xw_image_compare():
"""Test creating a plot with all the GEMPAK colors using xw style."""
c = range(32)
mplc = convert_gempak_color(c, style='xw')
delta = 0.025
x = y = np.arange(-3.0, 3.01, delta)
xx, yy = np.meshgrid(x, y)
z1 = np.exp(-xx**2 - yy**2)
z2 = np.exp(-(xx - 1)**2 - (yy - 1)**2)
z = (z1 - z2) * 2
fig = plt.figure(figsize=(9, 9))
cs = plt.contourf(xx, yy, z, levels=np.linspace(-1.8, 1.8, 33), colors=mplc)
plt.colorbar(cs)
return fig
def test_gempak_color_invalid_style():
"""Test converting a GEMPAK color with an invalid style parameter."""
c = range(32)
with pytest.raises(ValueError):
convert_gempak_color(c, style='plt')
def test_gempak_color_quirks():
"""Test converting some unusual GEMPAK colors."""
c = [-5, 95, 101]
mplc = convert_gempak_color(c)
truth = ['white', 'bisque', 'white']
assert mplc == truth
def test_gempak_color_scalar():
"""Test converting a single GEMPAK color."""
mplc = convert_gempak_color(6)
truth = 'cyan'
assert mplc == truth
| 31.054795
| 93
| 0.671372
|
003bcd9e170a259bc68deede4af20d46026a6eed
| 15,664
|
py
|
Python
|
plot_coco.py
|
zhangrj91/DarkPose
|
5fbdd3fd4699a1f2aa00df69b05f93e63b674670
|
[
"Apache-2.0"
] | null | null | null |
plot_coco.py
|
zhangrj91/DarkPose
|
5fbdd3fd4699a1f2aa00df69b05f93e63b674670
|
[
"Apache-2.0"
] | null | null | null |
plot_coco.py
|
zhangrj91/DarkPose
|
5fbdd3fd4699a1f2aa00df69b05f93e63b674670
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Ke Sun (sunk@mail.ustc.edu.cn)
# Modified by Depu Meng (mdp@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
import argparse
import numpy as np
import matplotlib.pyplot as plt
import cv2
import json
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import os
class ColorStyle:
def __init__(self, color, link_pairs, point_color):
self.color = color
self.link_pairs = link_pairs
self.point_color = point_color
for i in range(len(self.color)):
self.link_pairs[i].append(tuple(np.array(self.color[i])/255.))
self.ring_color = []
for i in range(len(self.point_color)):
self.ring_color.append(tuple(np.array(self.point_color[i])/255.))
# Xiaochu Style
# (R,G,B)
color1 = [(179,0,0),(228,26,28),(255,255,51),
(49,163,84), (0,109,45), (255,255,51),
(240,2,127),(240,2,127),(240,2,127), (240,2,127), (240,2,127),
(217,95,14), (254,153,41),(255,255,51),
(44,127,184),(0,0,255)]
link_pairs1 = [
[15, 13], [13, 11], [11, 5],
[12, 14], [14, 16], [12, 6],
[3, 1],[1, 2],[1, 0],[0, 2],[2,4],
[9, 7], [7,5], [5, 6],
[6, 8], [8, 10],
]
point_color1 = [(240,2,127),(240,2,127),(240,2,127),
(240,2,127), (240,2,127),
(255,255,51),(255,255,51),
(254,153,41),(44,127,184),
(217,95,14),(0,0,255),
(255,255,51),(255,255,51),(228,26,28),
(49,163,84),(252,176,243),(0,176,240),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142)]
xiaochu_style = ColorStyle(color1, link_pairs1, point_color1)
# Chunhua Style
# (R,G,B)
color2 = [(252,176,243),(252,176,243),(252,176,243),
(0,176,240), (0,176,240), (0,176,240),
(240,2,127),(240,2,127),(240,2,127), (240,2,127), (240,2,127),
(255,255,0), (255,255,0),(169, 209, 142),
(169, 209, 142),(169, 209, 142)]
link_pairs2 = [
[15, 13], [13, 11], [11, 5],
[12, 14], [14, 16], [12, 6],
[3, 1],[1, 2],[1, 0],[0, 2],[2,4],
[9, 7], [7,5], [5, 6], [6, 8], [8, 10],
]
point_color2 = [(240,2,127),(240,2,127),(240,2,127),
(240,2,127), (240,2,127),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142),
(252,176,243),(0,176,240),(252,176,243),
(0,176,240),(252,176,243),(0,176,240),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142),
(255,255,0),(169, 209, 142)]
chunhua_style = ColorStyle(color2, link_pairs2, point_color2)
def parse_args():
parser = argparse.ArgumentParser(description='Visualize COCO predictions')
# general
parser.add_argument('--image-path',
help='Path of COCO val images',
type=str,
default='data/coco/images/val2017/'
)
parser.add_argument('--gt-anno',
help='Path of COCO val annotation',
type=str,
default='data/coco/annotations/person_keypoints_val2017.json'
)
parser.add_argument('--save-path',
help="Path to save the visualizations",
type=str,
default='visualization/coco/')
parser.add_argument('--prediction',
help="Prediction file to visualize",
type=str,
required=True)
parser.add_argument('--style',
help="Style of the visualization: Chunhua style or Xiaochu style",
type=str,
default='chunhua')
args = parser.parse_args()
return args
def map_joint_dict(joints):
joints_dict = {}
for i in range(joints.shape[0]):
x = int(joints[i][0])
y = int(joints[i][1])
id = i
joints_dict[id] = (x, y)
return joints_dict
def plot(data, gt_file, img_path, save_path,
link_pairs, ring_color, save=True):
# joints
coco = COCO(gt_file)
coco_dt = coco.loadRes(data)
coco_eval = COCOeval(coco, coco_dt, 'keypoints')
coco_eval._prepare()
gts_ = coco_eval._gts
dts_ = coco_eval._dts
p = coco_eval.params
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
threshold = 0.3
joint_thres = 0.2
for catId in catIds:
for imgId in p.imgIds[:5000]:
if imgId != 259690:
continue
else:
# dimention here should be Nxm
gts = gts_[imgId, catId]
dts = dts_[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
if len(gts) == 0 or len(dts) == 0:
continue
sum_score = 0
num_box = 0
img_name = str(imgId).zfill(12)
# Read Images
img_file = img_path + img_name + '.jpg'
data_numpy = cv2.imread(img_file, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
h = data_numpy.shape[0]
w = data_numpy.shape[1]
# Plot
fig = plt.figure(figsize=(w / 100, h / 100), dpi=100)
ax = plt.subplot(1, 1, 1)
bk = plt.imshow(data_numpy[:, :, ::-1])
bk.set_zorder(-1)
print(img_name)
for j, gt in enumerate(gts):
# matching dt_box and gt_box
bb = gt['bbox']
x0 = bb[0] - bb[2];
x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3];
y1 = bb[1] + bb[3] * 2
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
# xg = g[0::3]; yg = g[1::3];
vg = g[2::3]
for i, dt in enumerate(dts):
# Calculate IoU
dt_bb = dt['bbox']
dt_x0 = dt_bb[0] - dt_bb[2];
dt_x1 = dt_bb[0] + dt_bb[2] * 2
dt_y0 = dt_bb[1] - dt_bb[3];
dt_y1 = dt_bb[1] + dt_bb[3] * 2
ol_x = min(x1, dt_x1) - max(x0, dt_x0)
ol_y = min(y1, dt_y1) - max(y0, dt_y0)
ol_area = ol_x * ol_y
s_x = max(x1, dt_x1) - min(x0, dt_x0)
s_y = max(y1, dt_y1) - min(y0, dt_y0)
sum_area = s_x * s_y
iou = ol_area / (sum_area + np.spacing(1))
score = dt['score']
if iou < 0.1 or score < threshold:
continue
else:
print('iou: ', iou)
dt_w = dt_x1 - dt_x0
dt_h = dt_y1 - dt_y0
ref = min(dt_w, dt_h)
num_box += 1
sum_score += dt['score']
dt_joints = np.array(dt['keypoints']).reshape(17, -1)
joints_dict = map_joint_dict(dt_joints)
# stick
for k, link_pair in enumerate(link_pairs):
if link_pair[0] in joints_dict \
and link_pair[1] in joints_dict:
if dt_joints[link_pair[0], 2] < joint_thres \
or dt_joints[link_pair[1], 2] < joint_thres \
or vg[link_pair[0]] == 0 \
or vg[link_pair[1]] == 0:
continue
if k in range(6, 11):
lw = 1
else:
lw = ref / 100.
if j == 0 and i == 3 and k not in range(6,11):
line = mlines.Line2D(
np.array([joints_dict[link_pair[0]][0],
joints_dict[link_pair[1]][0]]),
np.array([joints_dict[link_pair[0]][1],
joints_dict[link_pair[1]][1]]),
ls='-', lw=4, alpha=1, color=link_pair[2], )
line.set_zorder(0)
ax.add_line(line)
elif j == 1 and i == 0 and k not in range(6,11):
line = mlines.Line2D(
np.array([joints_dict[link_pair[0]][0],
joints_dict[link_pair[1]][0]]),
np.array([joints_dict[link_pair[0]][1],
joints_dict[link_pair[1]][1]]),
ls='-', lw=4, alpha=1, color=link_pair[2], )
line.set_zorder(0)
ax.add_line(line)
elif j == 0 and i == 3 or j == 1 and i == 0:
line = mlines.Line2D(
np.array([joints_dict[link_pair[0]][0],
joints_dict[link_pair[1]][0]]),
np.array([joints_dict[link_pair[0]][1],
joints_dict[link_pair[1]][1]]),
ls='-', lw=lw, alpha=1, color=link_pair[2], )
line.set_zorder(0)
ax.add_line(line)
# black ring
for k in range(dt_joints.shape[0]):
if dt_joints[k, 2] < joint_thres \
or vg[link_pair[0]] == 0 \
or vg[link_pair[1]] == 0:
continue
if dt_joints[k, 0] > w or dt_joints[k, 1] > h:
continue
if k in range(5):
radius = 1
else:
radius = ref / 100
if j == 0 and i == 3 and k == 9:
circle = mpatches.Circle(tuple(dt_joints[k, :2]),
radius=12,
ec='black',
fc=ring_color[k],
alpha=1,
linewidth=2)
circle.set_zorder(2)
ax.add_patch(circle)
elif j == 0 and i == 3 and k not in range(5):
circle = mpatches.Circle(tuple(dt_joints[k, :2]),
radius=6,
ec='black',
fc=ring_color[k],
alpha=1,
linewidth=1)
circle.set_zorder(1)
ax.add_patch(circle)
elif j == 1 and i == 0 and k not in range(5):
circle = mpatches.Circle(tuple(dt_joints[k, :2]),
radius=6,
ec='black',
fc=ring_color[k],
alpha=1,
linewidth=1)
circle.set_zorder(1)
ax.add_patch(circle)
elif j == 1 and i == 0 or j == 0 and i == 3:
circle = mpatches.Circle(tuple(dt_joints[k, :2]),
radius=radius,
ec='black',
fc=ring_color[k],
alpha=1,
linewidth=1)
circle.set_zorder(1)
ax.add_patch(circle)
avg_score = (sum_score / (num_box + np.spacing(1))) * 1000
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.axis('off')
plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
plt.margins(0, 0)
if save:
plt.savefig(save_path + \
'score_' + str(np.int(avg_score)) + \
'_id_' + str(imgId) + \
'_' + img_name + '.png',
format='png', bbox_inckes='tight', dpi=100)
plt.savefig(save_path + 'id_' + str(imgId) + '.pdf', format='pdf',
bbox_inckes='tight', dpi=100)
# plt.show()
plt.close()
if __name__ == '__main__':
args = parse_args()
if args.style == 'xiaochu':
# Xiaochu Style
colorstyle = xiaochu_style
elif args.style == 'chunhua':
# Chunhua Style
colorstyle = chunhua_style
else:
raise Exception('Invalid color style')
save_path = args.save_path
img_path = args.image_path
if not os.path.exists(save_path):
try:
os.makedirs(save_path)
except Exception:
print('Fail to make {}'.format(save_path))
with open(args.prediction) as f:
data = json.load(f)
gt_file = args.gt_anno
plot(data, gt_file, img_path, save_path, colorstyle.link_pairs, colorstyle.ring_color, save=True)
| 42.915068
| 101
| 0.385789
|
3d1321ddf1fd7d9ec0fe92cdbaa25a57c122c653
| 9,028
|
py
|
Python
|
test/py/testutils/__init__.py
|
modulus-sa/ganeti
|
592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98
|
[
"BSD-2-Clause"
] | 396
|
2015-01-22T11:44:32.000Z
|
2022-03-31T14:14:29.000Z
|
test/py/testutils/__init__.py
|
modulus-sa/ganeti
|
592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98
|
[
"BSD-2-Clause"
] | 1,550
|
2015-04-05T09:53:50.000Z
|
2022-03-28T17:42:20.000Z
|
test/py/testutils/__init__.py
|
modulus-sa/ganeti
|
592c0e945cc2c7b0013f813ea8c9d8ec0d5bab98
|
[
"BSD-2-Clause"
] | 119
|
2015-01-06T21:37:15.000Z
|
2022-03-07T06:36:26.000Z
|
#
#
# Copyright (C) 2006, 2007, 2008 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for unit testing"""
import os
import sys
import stat
import errno
import base64
import socket
import tempfile
import unittest
import logging
# Unified patch_object for various versions of Python Mock.
#
# Different Python Mock versions provide incompatible versions of patching an
# object. More recent versions use _patch_object, older ones used patch_object.
# This unifies the different variations.
import mock
try:
# pylint: disable=W0212
_patcher = mock._patch_object
except AttributeError:
# pylint: disable=E1101
try:
_patcher = mock.patch_object
except AttributeError:
_patcher = mock.patch.object
from ganeti import utils
def GetSourceDir():
return os.environ.get("TOP_SRCDIR", ".")
def TestDataFilename(name):
"""Returns the filename of a given test data file.
@type name: str
@param name: the 'base' of the file name, as present in
the test/data directory
@rtype: str
@return: the full path to the filename, such that it can
be used in 'make distcheck' rules
"""
return "%s/test/data/%s" % (GetSourceDir(), name)
def ReadTestData(name):
"""Returns the content of a test data file.
This is just a very simple wrapper over utils.ReadFile with the
proper test file name.
"""
return utils.ReadFile(TestDataFilename(name))
def _SetupLogging(verbose):
"""Setupup logging infrastructure.
"""
fmt = logging.Formatter("%(asctime)s: %(threadName)s"
" %(levelname)s %(message)s")
if verbose:
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(os.devnull, "a")
handler.setLevel(logging.NOTSET)
handler.setFormatter(fmt)
root_logger = logging.getLogger("")
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(handler)
def RequiresIPv6():
"""Decorator for tests requiring IPv6 support
Decorated tests will be skipped if no IPv6 networking
support is available on the host system.
"""
try:
# Try to bind a DGRAM socket on IPv6 localhost
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sock.bind(('::1', 0))
sock.close()
except socket.error as err:
if err.errno in (errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT):
return unittest.skip("IPv6 not available")
return lambda thing: thing
class GanetiTestProgram(unittest.TestProgram):
def runTests(self):
"""Runs all tests.
"""
_SetupLogging("LOGTOSTDERR" in os.environ)
sys.stderr.write("Running %s\n" % self.progName)
sys.stderr.flush()
# Ensure assertions will be evaluated
if not __debug__:
raise Exception("Not running in debug mode, assertions would not be"
" evaluated")
# Check again, this time with a real assertion
try:
assert False
except AssertionError:
pass
else:
raise Exception("Assertion not evaluated")
return unittest.TestProgram.runTests(self)
# pylint: disable=R0904
class GanetiTestCase(unittest.TestCase):
"""Helper class for unittesting.
This class defines a few utility functions that help in building
unittests. Child classes must call the parent setup and cleanup.
"""
def setUp(self):
self._temp_files = []
self.patches = {}
self.mocks = {}
def MockOut(self, name, patch=None):
if patch is None:
patch = name
self.patches[name] = patch
self.mocks[name] = patch.start()
def tearDown(self):
while self._temp_files:
try:
utils.RemoveFile(self._temp_files.pop())
except EnvironmentError:
pass
for patch in self.patches.values():
patch.stop()
self.patches = {}
self.mocks = {}
def assertFileContent(self, file_name, expected_content):
"""Checks that the content of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_content: str
@param expected_content: the content we expect
"""
actual_content = utils.ReadFile(file_name)
self.assertEqual(actual_content, expected_content)
def assertFileContentNotEqual(self, file_name, reference_content):
"""Checks that the content of a file is different to the reference.
@type file_name: str
@param file_name: the file whose contents we should check
@type reference_content: str
@param reference_content: the content we use as reference
"""
actual_content = utils.ReadFile(file_name)
self.assertNotEqual(actual_content, reference_content)
def assertFileMode(self, file_name, expected_mode):
"""Checks that the mode of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_mode: int
@param expected_mode: the mode we expect
"""
st = os.stat(file_name)
actual_mode = stat.S_IMODE(st.st_mode)
self.assertEqual(actual_mode, expected_mode)
def assertFileUid(self, file_name, expected_uid):
"""Checks that the user id of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_uid: int
@param expected_uid: the user id we expect
"""
st = os.stat(file_name)
actual_uid = st.st_uid
self.assertEqual(actual_uid, expected_uid)
def assertFileGid(self, file_name, expected_gid):
"""Checks that the group id of a file is what we expect.
@type file_name: str
@param file_name: the file whose contents we should check
@type expected_gid: int
@param expected_gid: the group id we expect
"""
st = os.stat(file_name)
actual_gid = st.st_gid
self.assertEqual(actual_gid, expected_gid)
def assertEqualValues(self, first, second, msg=None):
"""Compares two values whether they're equal.
Tuples are automatically converted to lists before comparing.
"""
return self.assertEqual(UnifyValueType(first),
UnifyValueType(second),
msg=msg)
def _CreateTempFile(self):
"""Creates a temporary file and adds it to the internal cleanup list.
This method simplifies the creation and cleanup of temporary files
during tests.
"""
fh, fname = tempfile.mkstemp(prefix="ganeti-test", suffix=".tmp")
os.close(fh)
self._temp_files.append(fname)
return fname
# pylint: enable=R0904
def patch_object(*args, **kwargs):
"""Unified patch_object for various versions of Python Mock."""
return _patcher(*args, **kwargs)
def UnifyValueType(data):
"""Converts all tuples into lists.
This is useful for unittests where an external library doesn't keep types.
"""
if isinstance(data, (tuple, list)):
return [UnifyValueType(i) for i in data]
elif isinstance(data, dict):
return dict([(UnifyValueType(key), UnifyValueType(value))
for (key, value) in data.items()])
return data
class CallCounter(object):
"""Utility class to count number of calls to a function/method.
"""
def __init__(self, fn):
"""Initializes this class.
@type fn: Callable
"""
self._fn = fn
self._count = 0
def __call__(self, *args, **kwargs):
"""Calls wrapped function with given parameters.
"""
self._count += 1
return self._fn(*args, **kwargs)
def Count(self):
"""Returns number of calls.
@rtype: number
"""
return self._count
def b64encode_string(text, encoding="utf-8"):
"""Utility to base64-encode a string
This exposes a string interface for Python3's b64encode
@type text: string
@param text: input string
"""
return base64.b64encode(text.encode(encoding)).decode("ascii").strip()
| 27.02994
| 79
| 0.70381
|
1820f220f73d62eea8ee358d300094f08f4c2d54
| 5,008
|
py
|
Python
|
towhee/engine/operator_context.py
|
ThyeeZz/towhee
|
4e1ace5e24f995b6f0f9b9bfac46d28ba8e0ce1d
|
[
"Apache-2.0"
] | null | null | null |
towhee/engine/operator_context.py
|
ThyeeZz/towhee
|
4e1ace5e24f995b6f0f9b9bfac46d28ba8e0ce1d
|
[
"Apache-2.0"
] | null | null | null |
towhee/engine/operator_context.py
|
ThyeeZz/towhee
|
4e1ace5e24f995b6f0f9b9bfac46d28ba8e0ce1d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from enum import Enum, auto
from collections import defaultdict
from towhee.dag.operator_repr import OperatorRepr
from towhee.engine.operator_runner.runner_base import RunnerStatus
from towhee.dataframe import DataFrame
from towhee.engine.operator_io import create_reader, create_writer
from towhee.engine.operator_runner import create_runner
from towhee.engine.thread_pool_task_executor import ThreadPoolTaskExecutor
class OpStatus(Enum):
NOT_RUNNING = auto()
RUNNING = auto()
FINISHED = auto()
FAILED = auto()
STOPPED = auto()
class OperatorContext:
"""
The OperatorContext manages an operator's input data and output data at runtime,
as well as the operators' dependency within a GraphContext.
The abstraction of OperatorContext hides the complexity of Dataframe management,
input iteration, and data dependency between Operators. It offers a Task-based
scheduling context.
Args:
op_repr: (OperatorRepr)
The operator representation
dataframes: (`dict` of `DataFrame`)
All the `DataFrames` in `GraphContext`
"""
def __init__(
self,
op_repr: OperatorRepr,
dataframes: Dict[str, DataFrame]
):
self._repr = op_repr
self._readers = OperatorContext._create_reader(op_repr, dataframes)
self._writer = OperatorContext._create_writer(op_repr, dataframes)
self._op_runners = []
self._op_status = OpStatus.NOT_RUNNING
self._err_msg = None
@staticmethod
def _create_reader(op_repr, dataframes):
inputs_index = defaultdict(dict)
for item in op_repr.inputs:
inputs_index[item['df']][item['name']] = item['col']
iter_type = op_repr.iter_info['type']
inputs = dict((item['df'], dataframes[item['df']]) for item in op_repr.inputs)
readers = []
for df_name, indexs in inputs_index.items():
readers.append(create_reader(inputs[df_name], iter_type, indexs))
return readers
@staticmethod
def _create_writer(op_repr, dataframes):
outputs = list({dataframes[output['df']]
for output in op_repr.outputs})
iter_type = op_repr.iter_info['type']
return create_writer(iter_type, outputs)
@property
def name(self):
return self._repr.name
@property
def err_msg(self):
return self._err_msg
@property
def status(self):
"""
Calc op-ctx status by checking all runners of this op-ctx
"""
if self._op_status in [OpStatus.FINISHED, OpStatus.FAILED]:
return self._op_status
if len(self._op_runners) == 0:
return self._op_status
finished_count = 0
for runner in self._op_runners:
if runner.status == RunnerStatus.FAILED:
self._op_status = OpStatus.FAILED
self._err_msg = runner.msg
else:
if runner.status == RunnerStatus.FINISHED:
finished_count += 1
if finished_count == len(self._op_runners):
self._op_status = OpStatus.FINISHED
return self._op_status
def start(self, executor: ThreadPoolTaskExecutor, count: int = 1) -> None:
if self._op_status != OpStatus.NOT_RUNNING:
raise RuntimeError('OperatorContext can only be started once')
self._op_status = OpStatus.RUNNING
try:
for i in range(count):
self._op_runners.append(
create_runner(self._repr.iter_info['type'],
self._repr.name, i, self._repr.name,
self._repr.function, self._repr.init_args,
self._readers, self._writer)
)
except AttributeError as e:
self._err_msg = str(e)
self._op_status = OpStatus.FAILED
return
for runner in self._op_runners:
executor.push_task(runner)
def stop(self):
if self.status != OpStatus.RUNNING:
raise RuntimeError('Op ctx is already not running.')
for runner in self._op_runners:
runner.set_stop()
def join(self):
# Wait all runners finished.
for runner in self._op_runners:
runner.join()
self._writer.close()
| 34.30137
| 86
| 0.642572
|
7fa4b0b4e24c6b75293ee7ed0c36fbb8c9f2eb16
| 3,071
|
py
|
Python
|
src/rnn/load_pretrained_word_embeddings.py
|
acoli-repo/OpenIE_Stanovsky_Dagan
|
0d4387b584914d118b487c6984d9655ef04f2abd
|
[
"MIT"
] | 117
|
2018-05-24T01:38:54.000Z
|
2022-03-31T09:48:34.000Z
|
src/rnn/load_pretrained_word_embeddings.py
|
acoli-repo/OpenIE_Stanovsky_Dagan
|
0d4387b584914d118b487c6984d9655ef04f2abd
|
[
"MIT"
] | 23
|
2018-07-26T07:47:45.000Z
|
2022-03-31T09:50:30.000Z
|
src/rnn/load_pretrained_word_embeddings.py
|
acoli-repo/OpenIE_Stanovsky_Dagan
|
0d4387b584914d118b487c6984d9655ef04f2abd
|
[
"MIT"
] | 25
|
2018-05-24T01:46:05.000Z
|
2022-03-25T17:35:27.000Z
|
""" Usage:
load_pretrained_word_embeddings [--glove=GLOVE_FN]
"""
from docopt import docopt
import numpy as np
from word_index import Word_index
import logging
logging.basicConfig(level = logging.DEBUG)
import sys
sys.path.append("./common")
from symbols import UNK_INDEX, UNK_SYMBOL, UNK_VALUE
from keras .layers import Embedding
class Glove:
"""
Stores pretrained word embeddings for GloVe, and
outputs a Keras Embeddings layer.
"""
def __init__(self, fn, dim = None):
"""
Load a GloVe pretrained embeddings model.
fn - Filename from which to load the embeddings
dim - Dimension of expected word embeddings, used as verficiation,
None avoids this check.
"""
self.fn = fn
self.dim = dim
logging.debug("Loading GloVe embeddings from: {} ...".format(self.fn))
self._load(self.fn)
logging.debug("Done!")
def _load(self, fn):
"""
Load glove embedding from a given filename
"""
self.word_index = {UNK_SYMBOL : UNK_INDEX}
emb = []
for line in open(fn):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
if self.dim:
assert(len(coefs) == self.dim)
else:
self.dim = len(coefs)
# Record mapping from word to index
self.word_index[word] = len(emb) + 1
emb.append(coefs)
# Add UNK at the first index in the table
self.emb = np.array([UNK_VALUE(self.dim)] + emb)
# Set the vobabulary size
self.vocab_size = len(self.emb)
def get_word_index(self, word, lower = True):
"""
Get the index of a given word (int).
If word doesnt exists, returns UNK.
lower - controls whether the word should be lowered before checking map
"""
if lower:
word = word.lower()
return self.word_index[word] \
if (word in self.word_index) else UNK_INDEX
def get_embedding_matrix(self):
"""
Return an embedding matrix for use in a Keras Embeddding layer
https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
word_index - Maps words in the dictionary to their index (one-hot encoding)
"""
return self.emb
def get_keras_embedding(self, **args):
"""
Get a Keras Embedding layer, loading this embedding as pretrained weights
The additional arguments given to this function are passed to the Keras Embbeding constructor.
"""
return Embedding(self.vocab_size,
self.dim,
weights = [self.get_embedding_matrix()],
**args)
if __name__ == "__main__":
args = docopt(__doc__)
if args["--glove"] is not None:
glove_fn = args["--glove"]
g = Glove(glove_fn)
emb = g.get_keras_embedding()
else:
logging.info(__doc__)
exit
| 32.326316
| 102
| 0.591664
|
ebb3259ab036436000c911c87f74c6006ac3ca13
| 4,740
|
py
|
Python
|
decora_wifi/models/omni_notifier_email.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 33
|
2017-09-02T16:37:15.000Z
|
2021-12-28T15:24:39.000Z
|
decora_wifi/models/omni_notifier_email.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 17
|
2017-09-12T04:53:07.000Z
|
2022-01-25T03:31:45.000Z
|
decora_wifi/models/omni_notifier_email.py
|
balloob/python-decora_wifi
|
47900ad67002f3655fc4c799518bc4e73293ceb4
|
[
"MIT"
] | 21
|
2018-01-29T22:50:06.000Z
|
2022-01-06T02:30:47.000Z
|
# Leviton Cloud Services API model OmniNotifierEmail.
# Auto-generated by api_scraper.py.
#
# Copyright 2017 Tim Lyakhovetskiy <tlyakhov@gmail.com>
#
# This code is released under the terms of the MIT license. See the LICENSE
# file for more details.
from decora_wifi.base_model import BaseModel
class OmniNotifierEmail(BaseModel):
def __init__(self, session, model_id=None):
super(OmniNotifierEmail, self).__init__(session, model_id)
@classmethod
def count(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/count"
return session.call_api(api, attribs, 'get')
@classmethod
def create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails"
return session.call_api(api, attribs, 'post')
@classmethod
def create_change_stream(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/change-stream"
return session.call_api(api, attribs, 'post')
@classmethod
def create_many(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails"
return session.call_api(api, attribs, 'post')
def delete_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def exists(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}/exists".format(self._id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def find(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails"
items = session.call_api(api, attribs, 'get')
result = []
if items is not None:
for data in items:
model = OmniNotifierEmail(session, data['id'])
model.data = data
result.append(model)
return result
def find_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'get')
self.data.update(data)
return self
@classmethod
def find_one(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/findOne"
return session.call_api(api, attribs, 'get')
def refresh(self):
api = "/OmniNotifierEmails/{0}".format(self._id)
result = self._session.call_api(api, {}, 'get')
if result is not None:
self.data.update(result)
return self
def get_omni_notifier(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}/omniNotifier".format(self._id)
data = self._session.call_api(api, attribs, 'get')
from .omni_notifier import OmniNotifier
model = OmniNotifier(self._session, data['id'])
model.data = data
return model
def replace_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}/replace".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def replace_or_create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/replaceOrCreate"
return session.call_api(api, attribs, 'post')
@classmethod
def update_all(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/update"
return session.call_api(api, attribs, 'post')
def update_attributes(self, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'put')
self.data.update(attribs)
return self
@classmethod
def upsert(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails"
data = session.call_api(api, attribs, 'put')
model = OmniNotifierEmail(session, data['id'])
model.data = data
return model
@classmethod
def upsert_with_where(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/OmniNotifierEmails/upsertWithWhere"
return session.call_api(api, attribs, 'post')
| 31.390728
| 75
| 0.605696
|
2b600676b71bf90d83df5833a2ffb4afe9d36422
| 8,929
|
py
|
Python
|
cash.py
|
gaoming714/cashDIFF
|
14f39d2fb3f4f627b5390072662e7312ebd3301b
|
[
"Apache-2.0"
] | null | null | null |
cash.py
|
gaoming714/cashDIFF
|
14f39d2fb3f4f627b5390072662e7312ebd3301b
|
[
"Apache-2.0"
] | null | null | null |
cash.py
|
gaoming714/cashDIFF
|
14f39d2fb3f4f627b5390072662e7312ebd3301b
|
[
"Apache-2.0"
] | null | null | null |
"""
市场代码,上海是0, 深圳是1。判断深圳的办法是看代码0或者3开头。
深圳用的是re模块进行分析,上海用的是XPATH,最终存储到ticker_list & ticker_detail
ticker_list 是基础,从网页上爬下来的list。
当replaceFlag(现金替代)True是允许现金替代,False是必须,False也就是意味着reportAmount需要设置为0.
essential:
config.json
geckodriver.exe
run:
poetry run python cash.py
build for exe-onefile:
poetry run pyinstaller cash.py --onefile
"""
import re
import json
import time
import pendulum
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
software_build = "20210720A"
ticker_list = []
ticker_detail = {}
ticker_real_json = {}
ticker_market = ""
report_str = ""
welcome_msg = ""
debug_flag = None
quary_str = ""
market_time = ""
def tickerInit():
global report_str
global welcome_msg
global debug_flag
global ticker_real_json
global ticker_market
# get config
data = None
with open("config.json", 'r', encoding='utf-8') as f:
data = json.load(f)
ETF_code= data["ETFcode"]
welcome_msg = data["welcome"]
debug_flag = data["debug"]
ticker_real_json = data["realAmount"]
ticker_market = data["market"]
# create query report
date_str = str(pendulum.now("Asia/Shanghai").format("YYYYMMDD"))
if ticker_market == "SZ" or ticker_market == "1":
report_str = "http://reportdocs.static.szse.cn/files/text/etf/ETF" + ETF_code + date_str + ".txt?random=0.1"
else:
report_str = "http://www.sse.com.cn/disclosure/fund/etflist/detail.shtml?type=004&fundid=" + ETF_code + "&etfClass=1"
if debug_flag == True:
print("申购赎回清单 访问地址:")
print(report_str)
print()
def tickerWelcome():
print("软件版本\t",software_build)
print("当前运行时间\t", pendulum.now("Asia/Shanghai"))
print(welcome_msg)
print("\n申购赎回清单 访问地址:" , report_str, "\n")
if ticker_market == "SZ" or ticker_market == "1":
options = Options()
options.add_argument('-headless') # headless
with webdriver.Firefox(executable_path='geckodriver', options=options) as driver:
wait = WebDriverWait(driver, 10)
driver.get(report_str)
# driver.get("http://reportdocs.static.szse.cn/files/text/etf/ETF15978320210706.txt")
page_source = driver.page_source
prettytableSZ(page_source)
else:
options = Options()
options.add_argument('-headless') # headless
with webdriver.Firefox(executable_path='geckodriver', options=options) as driver:
wait = WebDriverWait(driver, 10)
driver.get(report_str)
xpath_str = "/html/body/div[8]/div[2]/div[2]/div[2]/div/div/div/div[1]/div[2]/div[4]/div[2]/div/div[2]/table/tbody"
table_element = driver.find_element(By.XPATH,xpath_str)
prettytableSH(table_element)
# get detail for market
prettyquary()
# show init message
# print("全部申购赎回组合证券只数(不含 159900 证券) : ", len(ticker_list), "\n")
if debug_flag == True:
print("有效持仓数量")
print(len(ticker_list))
print()
print("持仓列表")
print(ticker_list)
print()
print("申购赎回清单json")
print(ticker_detail)
print()
def prettytableSZ(raw):
global ticker_list
global ticker_detail
if debug_flag == True:
print("申赎清单原始")
print(raw)
print()
raw_list = re.split("---*\n",raw)[7]
# report_list = re.split("挂牌市场\n|深圳市场\n|上海市场\n",report_raw)
report_list = re.split("市场\n\s*",raw_list)
report_list.pop(0)
report_list.pop()
for record in report_list:
record_list = re.split("\s{3,}",record)
code = record_list[0].strip()
if code == "159900" :
continue
name = record_list[1].strip()
realAmount =ticker_real_json[code]
reportAmount = record_list[2].strip().replace(",","")
if record_list[3].strip() == "允许":
replaceFlag = True
else:
replaceFlag = False
marketName = record_list[8].strip()
if marketName == "深圳":
marketFlag = "1"
else:
marketFlag = "0"
ticker_list.append(code)
ticker_detail[code]={}
ticker_detail[code]["code"] = code
ticker_detail[code]["name"] = name
ticker_detail[code]["realAmount"] = realAmount
ticker_detail[code]["reportAmount"] = reportAmount
ticker_detail[code]["replaceFlag"] = replaceFlag
ticker_detail[code]["marketName"] = marketName
ticker_detail[code]["marketFlag"] = marketFlag
ticker_detail[code]["codeAPI"] = marketFlag+code
if replaceFlag == False:
ticker_detail[code]["reportAmount"] = 0
def prettytableSH(element):
global ticker_list
global ticker_detail
if debug_flag == True:
print("申赎清单原始")
print(element.text)
print()
code_els = element.find_elements(By.XPATH, "./tr/td[1]")
name_els = element.find_elements(By.XPATH, "./tr/td[2]")
reportAmount_els = element.find_elements(By.XPATH, "./tr/td[3]")
replaceFlag_els = element.find_elements(By.XPATH, "./tr/td[4]")
# code_els = element.find_elements(By.XPATH, "//tr/td[5]")
# code_els = element.find_elements(By.XPATH, "//tr/td[6]")
# code_els = element.find_elements(By.XPATH, "//tr/td[7]")
for index, code_el in enumerate(code_els):
code = code_el.text
name = name_els[index].text
realAmount = ticker_real_json[code]
reportAmount = reportAmount_els[index].text
if replaceFlag_els[index].text == "允许":
replaceFlag = True
else:
replaceFlag = False
if code[0] == "0" or code[0] == "3":
marketFlag = "1"
else:
marketFlag = "0"
ticker_list.append(code)
ticker_detail[code]={}
ticker_detail[code]["code"] = code
ticker_detail[code]["name"] = name
ticker_detail[code]["realAmount"] = realAmount
ticker_detail[code]["reportAmount"] = reportAmount
ticker_detail[code]["replaceFlag"] = replaceFlag
# ticker_detail[code]["marketName"] = marketName
ticker_detail[code]["marketFlag"] = marketFlag
ticker_detail[code]["codeAPI"] = marketFlag+code
if replaceFlag == False:
ticker_detail[code]["reportAmount"] = 0
# print(ticker_list)
# print(ticker_detail)
def prettyquary():
global quary_str
quary_str += "http://api.money.126.net/data/feed/"
for ticker in ticker_list:
quary_str += ticker_detail[ticker]["codeAPI"]
quary_str += ","
quary_str += "0000001"
def tickerSelenium():
global market_time
global ticker_detail
#This example requires Selenium WebDriver 3.13 or newer
options = Options()
options.add_argument('-headless') # headless
with webdriver.Firefox(executable_path='geckodriver', options=options) as driver:
wait = WebDriverWait(driver, 10)
# driver.get("http://api.money.126.net/data/feed/"+ticker_raw)
driver.get(quary_str)
detail_raw = driver.find_element(By.XPATH, "/html/body/pre").text
# print(ticker_raw)
# print(driver.find_element(By.XPATH, "/html/body/pre").text)
json_raw = re.split('[()]',detail_raw)[1]
json_dict = json.loads(json_raw)
for key, value in json_dict.items():
tmp_key = value["symbol"]
tmp_dict = {}
# tmp_dict["code"] = value["symbol"]
# tmp_dict["name"] = value["name"]
tmp_dict["type"] = value["type"]
tmp_dict["time"] = value["time"]
tmp_dict["yestclose"] = value["yestclose"]
tmp_dict["price"] = value["price"]
tmp_dict["increase"] = value["price"] - value["yestclose"]
if key != "0000001":
ticker_detail[tmp_key].update(tmp_dict)
else:
market_time = value["time"]
if debug_flag == True:
print("带行情的详细信息")
print(ticker_detail)
print()
def tickerAddMargin():
for code in ticker_list:
realone = float(ticker_detail[code]["realAmount"])
reportone = float(ticker_detail[code]["reportAmount"])
increaseone = float(ticker_detail[code]["increase"])
margin = (realone - reportone) * increaseone
ticker_detail[code]["margin"] = margin
def tickerShow():
total = 0
for ticker in ticker_list:
total = total + ticker_detail[ticker]["margin"]
print(market_time, " ", round(total,2))
if __name__=="__main__":
tickerInit()
tickerWelcome()
# tickerInput()
while True:
tickerSelenium()
tickerAddMargin()
tickerShow()
time.sleep(3)
| 33.69434
| 127
| 0.628066
|
1c4af53588d57359ae6d05b519a1dc4f2f7ca79b
| 1,981
|
py
|
Python
|
ampel/mongo/query/general.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
ampel/mongo/query/general.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
ampel/mongo/query/general.py
|
mafn/Ampel-core
|
744acbf36f0a2ceae7230ceab1350236c1501b57
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/mongo/query/general.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 11.12.2019
# Last Modified Date: 17.02.2021
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
from bson.int64 import Int64
from typing import Any, Literal
from ampel.types import Tag, ChannelId, StockId, StrictIterable
from ampel.model.operator.AnyOf import AnyOf
from ampel.model.operator.AllOf import AllOf
from ampel.model.operator.OneOf import OneOf
from ampel.mongo.utils import maybe_match_array
from ampel.mongo.schema import apply_schema, apply_excl_schema
type_stock_id = (int, Int64, bytes, str)
def build_general_query(
stock: None | StockId | StrictIterable[StockId] = None,
channel: None | ChannelId | dict | AllOf[ChannelId] | AnyOf[ChannelId] | OneOf[ChannelId] = None,
tag: None | dict[Literal['with', 'without'], Tag | dict | AllOf[Tag] | AnyOf[Tag] | OneOf[Tag]] = None
) -> dict[str, Any]:
"""
Builds a query usable with the ampel "stock", "t0" (with channel=None), "t1" and "t2" collections
:param stock: matching multiple ids with a single query is possible
:param channel: None (no criterium) means all channel are considered.
:param tag: tags to be (or not to be) matched by query
:returns: query dict with matching criteria
:raises ValueError: apply_schema can raise ValueError in case the provided dict schema structure is unsupported
"""
query = {}
if stock:
query['stock'] = stock if isinstance(stock, type_stock_id) \
else maybe_match_array(stock) # type: ignore[arg-type]
if channel:
apply_schema(query, 'channel', channel)
if tag:
if 'with' in tag:
apply_schema(query, 'tag', tag['with'])
# Order matters, parse_dict(...) must be called *after* parse_excl_dict(...)
if 'without' in tag:
apply_excl_schema(query, 'tag', tag['without'])
return query
| 36.018182
| 112
| 0.710752
|
ed3fa86ab29192da1327df5b717fa078ffa78372
| 2,762
|
py
|
Python
|
read_in_data.py
|
ChenhaoLiu-SeasPenn/FCN.tensorflow
|
30d646f500b563b3df37eab24903a3aeaed35300
|
[
"MIT"
] | null | null | null |
read_in_data.py
|
ChenhaoLiu-SeasPenn/FCN.tensorflow
|
30d646f500b563b3df37eab24903a3aeaed35300
|
[
"MIT"
] | null | null | null |
read_in_data.py
|
ChenhaoLiu-SeasPenn/FCN.tensorflow
|
30d646f500b563b3df37eab24903a3aeaed35300
|
[
"MIT"
] | 1
|
2018-08-08T19:20:58.000Z
|
2018-08-08T19:20:58.000Z
|
__author__ = 'charlie'
import numpy as np
import os
import random
from six.moves import cPickle as pickle
from tensorflow.python.platform import gfile
import glob
import TensorflowUtils as utils
# DATA_URL = 'http://sceneparsing.csail.mit.edu/data/ADEChallengeData2016.zip'
#DATA_URL = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
def read_prediction_set(data_dir):
if not gfile.Exists(data_dir):
print("Image directory '" + data_dir + "' not found.")
return None
file_list = []
image_list = []
file_glob = os.path.join(data_dir, '*.' + 'png')
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
else:
image_list = [{'image': f, 'filename': os.path.splitext(f.split("/")[-1])[0]} for f in file_list]
print ('No. of files: %d' % len(image_list))
return image_list
def read_dataset(data_dir):
pickle_filename = "dataset.pickle"
pickle_filepath = os.path.join(data_dir, pickle_filename)
if not os.path.exists(pickle_filepath):
result = create_image_lists(data_dir)
print ("Pickling ...")
with open(pickle_filepath, 'wb') as f:
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print ("Found pickle file!")
with open(pickle_filepath, 'rb') as f:
result = pickle.load(f)
training_records = result['training']
validation_records = result['validation']
del result
return training_records, validation_records
def create_image_lists(image_dir):
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
directories = ['training', 'validation']
image_list = {}
for directory in directories:
file_list = []
image_list[directory] = []
file_glob = os.path.join(image_dir, "images", directory, '*.' + 'png')
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
else:
for f in file_list:
filename = os.path.splitext(f.split("/")[-1])[0]
annotation_file = os.path.join(image_dir, "annotations", directory, "label_" + filename + '.png')
if os.path.exists(annotation_file):
record = {'image': f, 'annotation': annotation_file, 'filename': filename}
image_list[directory].append(record)
else:
print("Annotation file not found for %s - Skipping" % filename)
random.shuffle(image_list[directory])
no_of_images = len(image_list[directory])
print ('No. of %s files: %d' % (directory, no_of_images))
return image_list
| 34.525
| 113
| 0.630702
|
1310bcc1dbdd5d8521e0c4fd6c57bab5dd5758a8
| 7,010
|
py
|
Python
|
benchmark/d3pe/d3pe/utils/tools.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 50
|
2021-02-07T08:10:28.000Z
|
2022-03-25T09:10:26.000Z
|
benchmark/d3pe/d3pe/utils/tools.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 7
|
2021-07-29T14:58:31.000Z
|
2022-02-01T08:02:54.000Z
|
benchmark/d3pe/d3pe/utils/tools.py
|
ssimonc/NeoRL
|
098c58c8e4c3e43e67803f6384619d3bfe7fce5d
|
[
"Apache-2.0"
] | 4
|
2021-04-01T16:30:15.000Z
|
2022-03-31T17:38:05.000Z
|
''' This file contain common tools shared across different OPE algorithms '''
import torch
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from typing import Optional, Union
from d3pe.utils.data import OPEDataset, to_torch
from d3pe.evaluator import Policy
from d3pe.utils.net import MLP, DistributionalCritic, TanhGaussianActor
from d3pe.utils.func import hard_clamp
def bc(dataset : OPEDataset,
min_actions : Optional[Union[torch.Tensor, np.ndarray, float]] = None,
max_actions : Optional[Union[torch.Tensor, np.ndarray, float]] = None,
policy_features : int = 256,
policy_layers : int = 2,
val_ratio : float = 0.2,
batch_size : int = 256,
epoch : int = 20,
lr : float = 3e-4,
weight_decay : float = 1e-5,
device : str = "cuda" if torch.cuda.is_available() else "cpu",
verbose : bool = False) -> TanhGaussianActor:
''' clone the policy in the dataset '''
data = dataset[0]
if min_actions is None: min_actions = dataset.get_action_boundary()[0]
if max_actions is None: max_actions = dataset.get_action_boundary()[1]
policy = TanhGaussianActor(data['obs'].shape[-1], data['action'].shape[-1], policy_features, policy_layers, min_actions, max_actions).to(device)
max_actions = torch.as_tensor(max_actions, dtype=torch.float32, device=device)
min_actions = torch.as_tensor(min_actions, dtype=torch.float32, device=device)
best_parameters = deepcopy(policy.state_dict())
best_loss = float('inf')
dataset_size = len(dataset)
val_size = int(dataset_size * val_ratio)
train_size = dataset_size - val_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
optim = torch.optim.AdamW(policy.parameters(), lr=lr, weight_decay=weight_decay)
device = next(policy.parameters()).device
if verbose: timer = tqdm(total=epoch)
for _ in range(epoch):
policy.train()
train_losses = []
for data in iter(train_loader):
data = to_torch(data, device=device)
action_dist = policy(data['obs'])
loss = - action_dist.log_prob(hard_clamp(data['action'], min_actions, max_actions, shrink=5e-5)).mean()
train_losses.append(loss.item())
optim.zero_grad()
loss.backward()
optim.step()
policy.eval()
with torch.no_grad():
val_loss = 0
for data in iter(val_loader):
data = to_torch(data, device=device)
action_dist = policy(data['obs'])
val_loss += - action_dist.log_prob(hard_clamp(data['action'], min_actions, max_actions, shrink=5e-5)).sum().item()
val_loss /= len(val_dataset) * data['action'].shape[-1]
if val_loss < best_loss:
best_loss = val_loss
best_parameters = deepcopy(policy.state_dict())
if verbose:
timer.update(1)
timer.set_description('train : %.3f, val : %.3f, best : %.3f' % (np.mean(train_losses), val_loss, best_loss))
if verbose: timer.close()
policy.load_state_dict(best_parameters)
return policy
def FQE(dataset : OPEDataset,
policy : Policy,
num_steps : int = 500000,
batch_size : int = 256,
lr : float = 1e-4,
weight_decay : float = 1e-5,
init_critic : Optional[Union[MLP, DistributionalCritic]] = None,
critic_hidden_features : int = 1024,
critic_hidden_layers : int = 4,
critic_type : str = 'distributional',
atoms : int = 51,
gamma : float = 0.99,
device : str = "cuda" if torch.cuda.is_available() else "cpu",
log : str = None,
verbose : bool = False,
*args, **kwargs) -> Union[MLP, DistributionalCritic]:
''' solve the value function of the policy given the dataset '''
writer = torch.utils.tensorboard.SummaryWriter(log) if log is not None else None
min_value, max_value = dataset.get_value_boundary(gamma)
policy = deepcopy(policy)
policy = policy.to(device)
data = dataset.sample(batch_size)
if init_critic is not None:
critic = deepcopy(init_critic)
else:
if critic_type == 'mlp':
critic = MLP(data['obs'].shape[-1] + data['action'].shape[-1], 1, critic_hidden_features, critic_hidden_layers).to(device)
elif critic_type == 'distributional':
critic = DistributionalCritic(data['obs'].shape[-1], data['action'].shape[-1],
critic_hidden_features, critic_hidden_layers,
min_value, max_value, atoms).to(device)
critic_optimizer = torch.optim.Adam(critic.parameters(), lr=lr, weight_decay=weight_decay)
target_critic = deepcopy(critic).to(device)
target_critic.requires_grad_(False)
if verbose:
counter = tqdm(total=num_steps)
for t in range(num_steps):
batch = dataset.sample(batch_size)
data = to_torch(batch, torch.float32, device=device)
r = data['reward']
terminals = data['done']
o = data['obs']
a = data['action']
o_ = data['next_obs']
a_ = torch.as_tensor(policy.get_action(o_), dtype=torch.float32, device=device)
if isinstance(critic, MLP):
q_target = target_critic(torch.cat((o_, a_), -1)).detach()
current_discount = gamma * (1 - terminals)
backup = r + current_discount * q_target
backup = torch.clamp(backup, min_value, max_value) # prevent explosion
q = critic(torch.cat((o, a), -1))
critic_loss = ((q - backup) ** 2).mean()
elif isinstance(critic, DistributionalCritic):
q, p = critic(o, a, with_p=True)
target_p = target_critic.get_target(o_, a_, r, gamma * (1 - terminals))
critic_loss = - (target_p * torch.log(p + 1e-8)).mean()
critic_optimizer.zero_grad()
critic_loss.backward()
critic_optimizer.step()
if writer is not None:
writer.add_scalar('q', scalar_value=q.mean().item(), global_step=t)
if t % 100 == 0:
with torch.no_grad():
target_critic.load_state_dict(critic.state_dict())
if verbose:
counter.update(1)
counter.set_description('loss : %.3f, q : %.3f' % (critic_loss.item(), q.mean().item()))
if verbose: counter.close()
return critic
| 40.755814
| 148
| 0.605849
|
918b31c1dfb33a74906a280e5b28dcd4614dfaa7
| 151
|
py
|
Python
|
PythonCode/1866.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
PythonCode/1866.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
PythonCode/1866.py
|
CrystianPrintes20/ProjetoUri
|
92a88ae2671a556f4d418c3605e9a2c6933dc9d8
|
[
"MIT"
] | null | null | null |
n = int(input())
c = []
for i in range(n):
x = int(input())
if x % 2 == 1:
c += [1]
else:
c += [0]
for j in c:
print(j)
| 15.1
| 20
| 0.377483
|
84ca93f439893e14df9c1533af4d4407d6cde80f
| 3,210
|
py
|
Python
|
Django/src/members/forms.py
|
RocketSoftware/PythonBetaProject
|
8a99bc0e264e3bb0169f557110fad25820e8ff09
|
[
"MIT"
] | 11
|
2015-06-04T21:54:20.000Z
|
2019-06-21T01:05:01.000Z
|
Django/src/members/forms.py
|
RocketSoftware/PythonBetaProject
|
8a99bc0e264e3bb0169f557110fad25820e8ff09
|
[
"MIT"
] | 1
|
2015-06-04T13:24:10.000Z
|
2015-06-04T13:24:10.000Z
|
Django/src/members/forms.py
|
RocketSoftware/PythonBetaProject
|
8a99bc0e264e3bb0169f557110fad25820e8ff09
|
[
"MIT"
] | 8
|
2015-06-03T21:27:52.000Z
|
2020-06-11T01:10:09.000Z
|
from django import forms
# get the UniVerse imports
import os
os.chdir("C:\\U2\\XDEMO\\pythonbetarocket")
import u2py
def validateid(check_id):
sub = u2py.Subroutine('VALIDATE_MEMBERS_ID', 4)
sub.args[0] = check_id
sub.call()
if str(sub.args[2]) != '0':
raise forms.ValidationError('Error! ' + str(sub.args[3]))
class MembersSearchForm(forms.Form):
# UniVerse dict names
uvfields = {
1: "ID",
2: "FIRST_NAME",
3: "LAST_NAME",
4: "ADDRESS",
5: "CITY",
6: "STATE_CODE",
}
# form field names, matching the UniVerse dict names
formfields = {
1: "member_id",
2: "member_first_name",
3: "member_last_name",
4: "member_address",
5: "member_city",
6: "member_state_code",
}
# setting up the form
member_id = forms.CharField(label='ID', required=False)
member_first_name = forms.CharField(label='Name', required=False)
member_last_name = forms.CharField(label='Surname', required=False)
member_address = forms.CharField(label='Address', required=False)
member_city = forms.CharField(label='City', required=False)
member_state_code = forms.CharField(label='State Code', required=False)
class MembersResultForm(forms.Form):
member_results = forms.ChoiceField(label='Results')
class MembersAdminForm(forms.Form):
# UniVerse dict names
uvfields = {
1: "FIRST_NAME",
2: "LAST_NAME",
3: "ADDRESS",
4: "CITY",
5: "STATE_CODE",
}
# form field names, matching the UniVerse dict names
formfields = {
1: "member_first_name",
2: "member_last_name",
3: "member_address",
4: "member_city",
5: "member_state_code",
}
# setting up the form
member_id = forms.CharField(label='ID')
member_first_name = forms.CharField(label='Name')
member_last_name = forms.CharField(label='Surname')
member_address = forms.CharField(label='Address')
member_city = forms.CharField(label='City')
member_state_code = forms.CharField(label='State Code')
def clean(self):
cleaned_data = super(MembersAdminForm, self).clean()
member_id = cleaned_data.get('member_id')
# anything else than NEW must already exist
if member_id != 'NEW':
validateid(member_id)
return self.cleaned_data
class MembersBankDetailForm(forms.Form):
card_choices = (
('AMEX', 'American Express'),
('V', 'Visa'),
('MC', 'Mastercard'),
)
uvfields = {
1: "CARDNUM",
2: "CARDTYPE",
3: "CARDEXP",
4: "CARDSEC",
}
formfields = {
1: "member_card_number",
2: "member_card_type",
3: "member_card_expiry",
4: "member_card_cvv",
}
#
member_id = forms.CharField(label='ID', widget=forms.TextInput(attrs={'readonly': 'readonly'}))
member_card_number = forms.CharField(label='Card Number', max_length=16)
member_card_type = forms.ChoiceField(label='Card Type', choices=card_choices)
member_card_expiry = forms.CharField(label='Expiry Date')
member_card_cvv = forms.CharField(label='CVV')
| 30
| 99
| 0.626168
|
436700b18e63e6926cabb123659169cb120f4872
| 50,174
|
py
|
Python
|
input/eg01-eg30/flatpower_d3ploy.py
|
robfairh/transition-scenarios
|
1602e5e8c8c6914ad8879b0cdc409e73444d0f5f
|
[
"BSD-3-Clause"
] | 2
|
2019-03-11T12:27:40.000Z
|
2019-12-05T08:09:02.000Z
|
input/eg01-eg30/flatpower_d3ploy.py
|
robfairh/transition-scenarios
|
1602e5e8c8c6914ad8879b0cdc409e73444d0f5f
|
[
"BSD-3-Clause"
] | 119
|
2016-09-14T16:15:54.000Z
|
2022-03-16T20:28:18.000Z
|
input/eg01-eg30/flatpower_d3ploy.py
|
gwenchee/transition-scenarios
|
17edc74bd7f6b7391930907447465c7dc2212ef7
|
[
"BSD-3-Clause"
] | 11
|
2017-02-21T19:56:44.000Z
|
2021-06-09T19:57:07.000Z
|
"""
Running this script generates .xml files and runs them producing the .sqlite
files for all the prediction methods.
The user can choose a demand equation (demand_eq), a buffer size
(buff_size), and the number of time steps forward (steps).
The buffer plays its role one time step before the transition
starts. The transition starts at after 960 time steps (80 years).
"""
import json
import re
import subprocess
import os
import sqlite3 as lite
import copy
import glob
import sys
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import d3ploy.tester as tester
import d3ploy.plotter as plotter
import collections
direc = os.listdir('./')
ENV = dict(os.environ)
ENV['PYTHONPATH'] = ".:" + ENV.get('PYTHONPATH', '')
calc_methods = ["ma", "arma", "arch", "poly", "exp_smoothing", "holt_winters",
"fft", "sw_seasonal"]
name = 'eg01-eg30-flatpower-d3ploy-buffer'
demand_eq = "60000"
buff_size = "0"
thro_frmixer = 50e3
buffer_frTR = thro_frmixer / (1.0 + 0.8854 / 0.1146)
buffer_frU = (buffer_frTR * 0.8854 / 0.1146) * 1.05
thro_frmixer = str(thro_frmixer)
buffer_frTR = str(buffer_frTR)
buffer_frU = str(buffer_frU)
thro_moxmixer = 100e3
buffer_moxTR = thro_moxmixer / (1.0 + 0.896 / 0.104)
buffer_moxU = (buffer_moxTR * 0.896 / 0.104) * 1.05
thro_moxmixer = str(thro_moxmixer)
buffer_moxTR = str(buffer_moxTR)
buffer_moxU = str(buffer_moxU)
control = """
<control>
<duration>1440</duration>
<startmonth>1</startmonth>
<startyear>2000</startyear>
<decay>lazy</decay>
</control>
<archetypes>
<spec>
<lib>cycamore</lib>
<name>Source</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Enrichment</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Reactor</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Storage</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Separations</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Mixer</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>Sink</name>
</spec>
<spec>
<lib>agents</lib>
<name>NullRegion</name>
</spec>
<spec>
<lib>agents</lib>
<name>NullInst</name>
</spec>
<spec>
<lib>cycamore</lib>
<name>DeployInst</name>
</spec>
<spec>
<lib>d3ploy.demand_driven_deployment_inst</lib>
<name>DemandDrivenDeploymentInst</name>
</spec>
<spec>
<lib>d3ploy.supply_driven_deployment_inst</lib>
<name>SupplyDrivenDeploymentInst</name>
</spec>
</archetypes>
<facility>
<name>source</name>
<config>
<Source>
<outcommod>sourceout</outcommod>
<outrecipe>sourceoutrecipe</outrecipe>
<throughput>1e8</throughput>
</Source>
</config>
</facility>
<facility>
<name>enrichment</name>
<config>
<Enrichment>
<feed_commod>sourceout</feed_commod>
<feed_recipe>sourceoutrecipe</feed_recipe>
<product_commod>enrichmentout</product_commod>
<tails_assay>0.0025</tails_assay>
<tails_commod>enrichmentwaste</tails_commod>
<swu_capacity>1e100</swu_capacity>
<initial_feed>5e7</initial_feed>
</Enrichment>
</config>
</facility>
<facility>
<name>lwr</name>
<config>
<Reactor>
<fuel_inrecipes> <val>lwrinrecipe</val> </fuel_inrecipes>
<fuel_outrecipes> <val>lwroutrecipe</val> </fuel_outrecipes>
<fuel_incommods> <val>enrichmentout</val> </fuel_incommods>
<fuel_outcommods> <val>lwrout</val> </fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr1</name>
<lifetime>960</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr2</name>
<lifetime>980</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr3</name>
<lifetime>1000</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr4</name>
<lifetime>1020</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr5</name>
<lifetime>1040</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwr6</name>
<lifetime>1060</lifetime>
<config>
<Reactor>
<fuel_inrecipes>
<val>lwrinrecipe</val>
</fuel_inrecipes>
<fuel_outrecipes>
<val>lwroutrecipe</val>
</fuel_outrecipes>
<fuel_incommods>
<val>enrichmentout</val>
</fuel_incommods>
<fuel_outcommods>
<val>lwrout</val>
</fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>29863.3</assem_size>
<n_assem_core>3</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>fr</name>
<lifetime>720</lifetime>
<config>
<Reactor>
<fuel_inrecipes> <val>frinrecipe</val> </fuel_inrecipes>
<fuel_outrecipes> <val>froutrecipe</val> </fuel_outrecipes>
<fuel_incommods> <val>frmixerout</val> </fuel_incommods>
<fuel_outcommods> <val>frout</val> </fuel_outcommods>
<cycle_time>12</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>3950</assem_size>
<n_assem_core>1</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>333.34</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>moxlwr</name>
<lifetime>960</lifetime>
<config>
<Reactor>
<fuel_inrecipes> <val>moxinrecipe</val> </fuel_inrecipes>
<fuel_outrecipes> <val>moxoutrecipe</val> </fuel_outrecipes>
<fuel_incommods> <val>moxmixerout</val> </fuel_incommods>
<fuel_outcommods> <val>moxout</val> </fuel_outcommods>
<cycle_time>18</cycle_time>
<refuel_time>0</refuel_time>
<assem_size>33130</assem_size>
<n_assem_core>1</n_assem_core>
<n_assem_batch>1</n_assem_batch>
<power_cap>1000</power_cap>
</Reactor>
</config>
</facility>
<facility>
<name>lwrstorage</name>
<config>
<Storage>
<in_commods>
<val>lwrout</val>
</in_commods>
<residence_time>36</residence_time>
<out_commods>
<val>lwrstorageout</val>
</out_commods>
<max_inv_size>1e8</max_inv_size>
</Storage>
</config>
</facility>
<facility>
<name>frstorage</name>
<config>
<Storage>
<in_commods>
<val>frout</val>
</in_commods>
<residence_time>36</residence_time>
<out_commods>
<val>frstorageout</val>
</out_commods>
<max_inv_size>1e8</max_inv_size>
</Storage>
</config>
</facility>
<facility>
<name>moxstorage</name>
<config>
<Storage>
<in_commods>
<val>moxout</val>
</in_commods>
<residence_time>36</residence_time>
<out_commods>
<val>moxstorageout</val>
</out_commods>
<max_inv_size>1e8</max_inv_size>
</Storage>
</config>
</facility>
<facility>
<name>lwrreprocessing</name>
<config>
<Separations>
<feed_commods>
<val>lwrstorageout</val>
</feed_commods>
<feedbuf_size>1e8</feedbuf_size>
<throughput>1e8</throughput>
<leftover_commod>lwrreprocessingwaste</leftover_commod>
<leftoverbuf_size>1e8</leftoverbuf_size>
<streams>
<item>
<commod>lwrtru</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>Np</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Am</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Cm</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Pu</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
<item>
<commod>lwru</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>U</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
</streams>
</Separations>
</config>
</facility>
<facility>
<name>frreprocessing</name>
<config>
<Separations>
<feed_commods>
<val>frstorageout</val>
</feed_commods>
<feedbuf_size>1e8</feedbuf_size>
<throughput>1e8</throughput>
<leftover_commod>frreprocessingwaste</leftover_commod>
<leftoverbuf_size>1e8</leftoverbuf_size>
<streams>
<item>
<commod>frtru</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>Np</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Am</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Cm</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Pu</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
<item>
<commod>fru</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>U</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
</streams>
</Separations>
</config>
</facility>
<facility>
<name>moxreprocessing</name>
<config>
<Separations>
<feed_commods>
<val>moxstorageout</val>
</feed_commods>
<feedbuf_size>1e8</feedbuf_size>
<throughput>1e8</throughput>
<leftover_commod>moxreprocessingwaste</leftover_commod>
<leftoverbuf_size>1e8</leftoverbuf_size>
<streams>
<item>
<commod>moxtru</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>Np</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Am</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Cm</comp>
<eff>1.0</eff>
</item>
<item>
<comp>Pu</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
<item>
<commod>moxu</commod>
<info>
<buf_size>1e8</buf_size>
<efficiencies>
<item>
<comp>U</comp>
<eff>1.0</eff>
</item>
</efficiencies>
</info>
</item>
</streams>
</Separations>
</config>
</facility>
<facility>
<name>frmixer</name>
<config>
<Mixer>
<in_streams>
<stream>
<info>
<mixing_ratio>0.1146</mixing_ratio>
<buf_size>%s</buf_size>
</info>
<commodities>
<item>
<commodity>lwrtru</commodity>
<pref>3.0</pref>
</item>
<item>
<commodity>frtru</commodity>
<pref>2.0</pref>
</item>
<item>
<commodity>moxtru</commodity>
<pref>1.0</pref>
</item>
</commodities>
</stream>
<stream>
<info>
<mixing_ratio>0.8854</mixing_ratio>
<buf_size>%s</buf_size>
</info>
<commodities>
<item>
<commodity>lwru</commodity>
<pref>3.0</pref>
</item>
<item>
<commodity>fru</commodity>
<pref>2.0</pref>
</item>
<item>
<commodity>moxu</commodity>
<pref>1.0</pref>
</item>
<item>
<commodity>tailings</commodity>
<pref>1.0</pref>
</item>
</commodities>
</stream>
</in_streams>
<out_commod>frmixerout</out_commod>
<out_buf_size>%s</out_buf_size>
<throughput>%s</throughput>
</Mixer>
</config>
</facility>
<facility>
<name>moxmixer</name>
<config>
<Mixer>
<in_streams>
<stream>
<info>
<mixing_ratio>0.104</mixing_ratio>
<buf_size>%s</buf_size>
</info>
<commodities>
<item>
<commodity>lwrtru</commodity>
<pref>3.0</pref>
</item>
<item>
<commodity>frtru</commodity>
<pref>1.0</pref>
</item>
<item>
<commodity>moxtru</commodity>
<pref>2.0</pref>
</item>
</commodities>
</stream>
<stream>
<info>
<mixing_ratio>0.896</mixing_ratio>
<buf_size>%s</buf_size>
</info>
<commodities>
<item>
<commodity>lwru</commodity>
<pref>3.0</pref>
</item>
<item>
<commodity>fru</commodity>
<pref>1.0</pref>
</item>
<item>
<commodity>moxu</commodity>
<pref>2.0</pref>
</item>
<item>
<commodity>tailings</commodity>
<pref>1.0</pref>
</item>
</commodities>
</stream>
</in_streams>
<out_commod>moxmixerout</out_commod>
<out_buf_size>%s</out_buf_size>
<throughput>%s</throughput>
</Mixer>
</config>
</facility>
<facility>
<name>lwrsink</name>
<config>
<Sink>
<in_commods>
<val>lwrreprocessingwaste</val>
</in_commods>
<max_inv_size>1e20</max_inv_size>
</Sink>
</config>
</facility>
<facility>
<name>frsink</name>
<config>
<Sink>
<in_commods>
<val>frreprocessingwaste</val>
</in_commods>
<max_inv_size>1e20</max_inv_size>
</Sink>
</config>
</facility>
<facility>
<name>moxsink</name>
<config>
<Sink>
<in_commods>
<val>moxreprocessingwaste</val>
</in_commods>
<max_inv_size>1e20</max_inv_size>
</Sink>
</config>
</facility>
""" % (buffer_frTR, buffer_frU, thro_frmixer, thro_frmixer,
buffer_moxTR, buffer_moxU, thro_moxmixer, thro_moxmixer)
recipes = """
<recipe>
<name>sourceoutrecipe</name>
<basis>mass</basis>
<nuclide> <id>U235</id> <comp>0.711</comp> </nuclide>
<nuclide> <id>U238</id> <comp>99.289</comp> </nuclide>
</recipe>
<recipe>
<name>lwrinrecipe</name>
<basis>mass</basis>
<nuclide> <id>U234</id> <comp>0.0002558883</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.0319885317</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.96775558</comp> </nuclide>
</recipe>
<recipe>
<name>lwroutrecipe</name>
<basis>mass</basis>
<nuclide> <id>He4</id> <comp>9.47457840128509E-07</comp> </nuclide>
<nuclide> <id>Ra226</id> <comp>9.78856442957042E-14</comp> </nuclide>
<nuclide> <id>Ra228</id> <comp>2.75087759176098E-20</comp> </nuclide>
<nuclide> <id>Pb206</id> <comp>5.57475193532078E-18</comp> </nuclide>
<nuclide> <id>Pb207</id> <comp>1.68592497990149E-15</comp> </nuclide>
<nuclide> <id>Pb208</id> <comp>3.6888358546006E-12</comp> </nuclide>
<nuclide> <id>Pb210</id> <comp>3.02386544437848E-19</comp> </nuclide>
<nuclide> <id>Th228</id> <comp>8.47562285269577E-12</comp> </nuclide>
<nuclide> <id>Th229</id> <comp>2.72787861516683E-12</comp> </nuclide>
<nuclide> <id>Th230</id> <comp>2.6258831537493E-09</comp> </nuclide>
<nuclide> <id>Th232</id> <comp>4.17481422959E-10</comp> </nuclide>
<nuclide> <id>Bi209</id> <comp>6.60770597104927E-16</comp> </nuclide>
<nuclide> <id>Ac227</id> <comp>3.0968621961773E-14</comp> </nuclide>
<nuclide> <id>Pa231</id> <comp>9.24658854635179E-10</comp> </nuclide>
<nuclide> <id>U232</id> <comp>0.000000001</comp> </nuclide>
<nuclide> <id>U233</id> <comp>2.21390148606282E-09</comp> </nuclide>
<nuclide> <id>U234</id> <comp>0.0001718924</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.0076486597</comp> </nuclide>
<nuclide> <id>U236</id> <comp>0.0057057461</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.9208590237</comp> </nuclide>
<nuclide> <id>Np237</id> <comp>0.0006091729</comp> </nuclide>
<nuclide> <id>Pu238</id> <comp>0.000291487</comp> </nuclide>
<nuclide> <id>Pu239</id> <comp>0.0060657301</comp> </nuclide>
<nuclide> <id>Pu240</id> <comp>0.0029058707</comp> </nuclide>
<nuclide> <id>Pu241</id> <comp>0.0017579218</comp> </nuclide>
<nuclide> <id>Pu242</id> <comp>0.0008638616</comp> </nuclide>
<nuclide> <id>Pu244</id> <comp>2.86487251922763E-08</comp> </nuclide>
<nuclide> <id>Am241</id> <comp>6.44271331287386E-05</comp> </nuclide>
<nuclide> <id>Am242m</id> <comp>8.53362027193319E-07</comp> </nuclide>
<nuclide> <id>Am243</id> <comp>0.0001983912</comp> </nuclide>
<nuclide> <id>Cm242</id> <comp>2.58988475560194E-05</comp> </nuclide>
<nuclide> <id>Cm243</id> <comp>0.000000771</comp> </nuclide>
<nuclide> <id>Cm244</id> <comp>8.5616190260478E-05</comp> </nuclide>
<nuclide> <id>Cm245</id> <comp>5.72174539442251E-06</comp> </nuclide>
<nuclide> <id>Cm246</id> <comp>7.29567535786554E-07</comp> </nuclide>
<nuclide> <id>Cm247</id> <comp>0.00000001</comp> </nuclide>
<nuclide> <id>Cm248</id> <comp>7.69165773748653E-10</comp> </nuclide>
<nuclide> <id>Cm250</id> <comp>4.2808095130239E-18</comp> </nuclide>
<nuclide> <id>Cf249</id> <comp>1.64992658175413E-12</comp> </nuclide>
<nuclide> <id>Cf250</id> <comp>2.04190913935875E-12</comp> </nuclide>
<nuclide> <id>Cf251</id> <comp>9.86556100338561E-13</comp> </nuclide>
<nuclide> <id>Cf252</id> <comp>6.57970721693466E-13</comp> </nuclide>
<nuclide> <id>H3</id> <comp>8.58461800264195E-08</comp> </nuclide>
<nuclide> <id>C14</id> <comp>4.05781943561107E-11</comp> </nuclide>
<nuclide> <id>Kr81</id> <comp>4.21681236076192E-11</comp> </nuclide>
<nuclide> <id>Kr85</id> <comp>3.44484671160181E-05</comp> </nuclide>
<nuclide> <id>Sr90</id> <comp>0.0007880649</comp> </nuclide>
<nuclide> <id>Tc99</id> <comp>0.0011409492</comp> </nuclide>
<nuclide> <id>I129</id> <comp>0.0002731878</comp> </nuclide>
<nuclide> <id>Cs134</id> <comp>0.0002300898</comp> </nuclide>
<nuclide> <id>Cs135</id> <comp>0.0006596706</comp> </nuclide>
<nuclide> <id>Cs137</id> <comp>0.0018169192</comp> </nuclide>
<nuclide> <id>H1</id> <comp>0.0477938151</comp> </nuclide>
</recipe>
<recipe>
<name>frinrecipe</name>
<basis>mass</basis>
<nuclide> <id>He4</id> <comp>8.12E-11</comp> </nuclide>
<nuclide> <id>U232</id> <comp>4.71E-09</comp> </nuclide>
<nuclide> <id>U234</id> <comp>0.0003</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.0004</comp> </nuclide>
<nuclide> <id>U236</id> <comp>0.0003</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.83</comp> </nuclide>
<nuclide> <id>Np237</id> <comp>0.0007</comp> </nuclide>
<nuclide> <id>Pu238</id> <comp>0.0022</comp> </nuclide>
<nuclide> <id>Pu239</id> <comp>0.0947</comp> </nuclide>
<nuclide> <id>Pu240</id> <comp>0.0518</comp> </nuclide>
<nuclide> <id>Pu241</id> <comp>0.0072</comp> </nuclide>
<nuclide> <id>Pu242</id> <comp>0.0057</comp> </nuclide>
<nuclide> <id>Am241</id> <comp>0.0031</comp> </nuclide>
<nuclide> <id>Am242m</id> <comp>0.0002</comp> </nuclide>
<nuclide> <id>Am243</id> <comp>0.0016</comp> </nuclide>
<nuclide> <id>Cm242</id> <comp>0.0000</comp> </nuclide>
<nuclide> <id>Cm243</id> <comp>0.0000</comp> </nuclide>
<nuclide> <id>Cm244</id> <comp>0.0011</comp> </nuclide>
<nuclide> <id>Cm245</id> <comp>0.0003</comp> </nuclide>
<nuclide> <id>Cm246</id> <comp>0.0001</comp> </nuclide>
</recipe>
<recipe>
<name>froutrecipe</name>
<basis>mass</basis>
<nuclide> <id>H1</id> <comp>1.8902589607549852e-06</comp> </nuclide>
<nuclide> <id>H2</id> <comp>5.790966758382324e-07</comp> </nuclide>
<nuclide> <id>H3</id> <comp>7.757710185757454e-06</comp> </nuclide>
<nuclide> <id>He3</id> <comp>7.757710185757454e-06</comp> </nuclide>
<nuclide> <id>He4</id> <comp>0.00011964355849865369</comp> </nuclide>
<nuclide> <id>Br85</id> <comp>0.00033707797074734847</comp> </nuclide>
<nuclide> <id>Kr82</id> <comp>3.004746902934225e-07</comp> </nuclide>
<nuclide> <id>Kr85</id> <comp>7.539183138271328e-05</comp> </nuclide>
<nuclide> <id>Kr85m</id> <comp>0.00033707797074734847</comp> </nuclide>
<nuclide> <id>Sr90</id> <comp>0.001109571083610802</comp> </nuclide>
<nuclide> <id>Zr95</id> <comp>0.0025578590908250983</comp> </nuclide>
<nuclide> <id>Nb94</id> <comp>1.3931099277240498e-09</comp> </nuclide>
<nuclide> <id>Nb95</id> <comp>0.0025567664555876677</comp> </nuclide>
<nuclide> <id>Nb95m</id> <comp>2.7643671506994872e-05</comp> </nuclide>
<nuclide> <id>Mo94</id> <comp>2.6223245698335053e-12</comp> </nuclide>
<nuclide> <id>Mo96</id> <comp>9.287399518160331e-07</comp> </nuclide>
<nuclide> <id>Mo99</id> <comp>0.0031795685409231255</comp> </nuclide>
<nuclide> <id>Tc99</id> <comp>0.0031795685409231255</comp> </nuclide>
<nuclide> <id>Ru103</id> <comp>0.003600233107333917</comp> </nuclide>
<nuclide> <id>Ru106</id> <comp>0.002256291765294245</comp> </nuclide>
<nuclide> <id>Rh106</id> <comp>0.002256291765294245</comp> </nuclide>
<nuclide> <id>Sn121m</id> <comp>2.8954833791911622e-06</comp> </nuclide>
<nuclide> <id>Sb122</id> <comp>8.358659566344297e-09</comp> </nuclide>
<nuclide> <id>Sb124</id> <comp>8.41329132821583e-07</comp> </nuclide>
<nuclide> <id>Sb125</id> <comp>7.539183138271328e-05</comp> </nuclide>
<nuclide> <id>Te132</id> <comp>0.002687882684079343</comp> </nuclide>
<nuclide> <id>I129</id> <comp>0.0007156760805170608</comp> </nuclide>
<nuclide> <id>I131</id> <comp>0.0022344390605456327</comp> </nuclide>
<nuclide> <id>I133</id> <comp>0.0038187601548200422</comp> </nuclide>
<nuclide> <id>I135</id> <comp>0.003409021940783557</comp> </nuclide>
<nuclide> <id>Xe128</id> <comp>1.365794046788284e-09</comp> </nuclide>
<nuclide> <id>Xe130</id> <comp>1.2619936992323744e-06</comp> </nuclide>
<nuclide> <id>Xe131m</id> <comp>2.4256502270959927e-05</comp> </nuclide>
<nuclide> <id>Xe133</id> <comp>0.0038406128595686547</comp> </nuclide>
<nuclide> <id>Xe133m</id> <comp>0.00012182882897351494</comp> </nuclide>
<nuclide> <id>Xe135</id> <comp>0.004097382140364852</comp> </nuclide>
<nuclide> <id>Xe135m</id> <comp>0.0010762457088691678</comp> </nuclide>
<nuclide> <id>Cs134</id> <comp>6.282652615226107e-07</comp> </nuclide>
<nuclide> <id>Cs137</id> <comp>0.0034691168788422416</comp> </nuclide>
<nuclide> <id>Ba140</id> <comp>0.0028971223320473083</comp> </nuclide>
<nuclide> <id>La140</id> <comp>0.00290859500204033</comp> </nuclide>
<nuclide> <id>Ce141</id> <comp>0.0027370512697637212</comp> </nuclide>
<nuclide> <id>Ce144</id> <comp>0.001914296935978459</comp> </nuclide>
<nuclide> <id>Pr144</id> <comp>0.0019148432535971744</comp> </nuclide>
<nuclide> <id>Nd142</id> <comp>1.3712572229754374e-09</comp> </nuclide>
<nuclide> <id>Nd144</id> <comp>0.0019148432535971744</comp> </nuclide>
<nuclide> <id>Nd147</id> <comp>0.0010538466865018402</comp> </nuclide>
<nuclide> <id>Pm147</id> <comp>0.0010538466865018402</comp> </nuclide>
<nuclide> <id>Pm148</id> <comp>6.5558114245837635e-09</comp> </nuclide>
<nuclide> <id>Pm148m</id> <comp>1.5843210942744093e-08</comp> </nuclide>
<nuclide> <id>Pm149</id> <comp>0.0006965549638620248</comp> </nuclide>
<nuclide> <id>Pm151</id> <comp>0.0004348688244973896</comp> </nuclide>
<nuclide> <id>Sm148</id> <comp>2.1306387129897234e-08</comp> </nuclide>
<nuclide> <id>Sm150</id> <comp>2.7862198554480997e-06</comp> </nuclide>
<nuclide> <id>Sm151</id> <comp>0.000435415142116105</comp> </nuclide>
<nuclide> <id>Sm153</id> <comp>0.00021852704748612548</comp> </nuclide>
<nuclide> <id>Eu151</id> <comp>0.000435415142116105</comp> </nuclide>
<nuclide> <id>Eu152</id> <comp>2.622324569833505e-10</comp> </nuclide>
<nuclide> <id>Eu154</id> <comp>6.938233757684484e-08</comp> </nuclide>
<nuclide> <id>Eu155</id> <comp>9.342031280031864e-05</comp> </nuclide>
<nuclide> <id>Pu238</id> <comp>0.0009592342315400002</comp> </nuclide>
<nuclide> <id>Pu239</id> <comp>0.08159736522620001</comp> </nuclide>
<nuclide> <id>Pu240</id> <comp>0.026759037549500004</comp> </nuclide>
<nuclide> <id>Pu241</id> <comp>0.0029846904673000003</comp> </nuclide>
<nuclide> <id>Pu242</id> <comp>0.00149966729066</comp> </nuclide>
<nuclide> <id>Pu244</id> <comp>5.234800000000001e-09</comp> </nuclide>
<nuclide> <id>U232</id> <comp>7.93244176110115e-09</comp> </nuclide>
<nuclide> <id>U233</id> <comp>5.4819338672768064e-09</comp> </nuclide>
<nuclide> <id>U234</id> <comp>0.00016133585380000002</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.00019790111900000001</comp> </nuclide>
<nuclide> <id>U236</id> <comp>0.00031121273160000004</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.8013295369022</comp> </nuclide>
<nuclide> <id>Am241</id> <comp>0.372034217349</comp> </nuclide>
<nuclide> <id>Am242m</id> <comp>0.02422538847</comp> </nuclide>
<nuclide> <id>Am243</id> <comp>0.10954349109900001</comp> </nuclide>
<nuclide> <id>Cm242</id> <comp>0.016223903343</comp> </nuclide>
<nuclide> <id>Cm243</id> <comp>0.000986552217</comp> </nuclide>
<nuclide> <id>Cm244</id> <comp>0.054130938228</comp> </nuclide>
<nuclide> <id>Cm245</id> <comp>0.011979036378</comp> </nuclide>
<nuclide> <id>Cm246</id> <comp>0.004365667044</comp> </nuclide>
<nuclide> <id>Cm247</id> <comp>0.000161795151</comp> </nuclide>
<nuclide> <id>Cm248</id> <comp>1.1562525796316895e-05</comp> </nuclide>
<nuclide> <id>Cm250</id> <comp>1.9498012888129913e-12</comp> </nuclide>
<nuclide> <id>Cf249</id> <comp>1.9616003888965777e-07</comp> </nuclide>
<nuclide> <id>Cf250</id> <comp>2.0397694269503573e-08</comp> </nuclide>
<nuclide> <id>Cf251</id> <comp>5.791883253531473e-10</comp> </nuclide>
<nuclide> <id>Cf252</id> <comp>1.2216493249045363e-11</comp> </nuclide>
<nuclide> <id>Np237</id> <comp>0.21633723097200003</comp> </nuclide>
</recipe>
<recipe>
<name>moxinrecipe</name>
<basis>mass</basis>
<nuclide> <id>U234</id> <comp>0.0001387087</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.0041782621</comp> </nuclide>
<nuclide> <id>U236</id> <comp>0.003077193</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.5029315439</comp> </nuclide>
<nuclide> <id>Np237</id> <comp>0.0232914608</comp> </nuclide>
<nuclide> <id>Pu238</id> <comp>0.0115607251</comp> </nuclide>
<nuclide> <id>Pu239</id> <comp>0.2333146335</comp> </nuclide>
<nuclide> <id>Pu240</id> <comp>0.1104069247</comp> </nuclide>
<nuclide> <id>Pu241</id> <comp>0.0519432579</comp> </nuclide>
<nuclide> <id>Pu242</id> <comp>0.0323320279</comp> </nuclide>
<nuclide> <id>Am241</id> <comp>0.0165510381</comp> </nuclide>
<nuclide> <id>Am242m</id> <comp>2.97718672915165E-05</comp> </nuclide>
<nuclide> <id>Am243</id> <comp>0.0073874633</comp> </nuclide>
<nuclide> <id>Cm243</id> <comp>2.49115624531971E-05</comp> </nuclide>
<nuclide> <id>Cm244</id> <comp>0.0026211644</comp> </nuclide>
<nuclide> <id>Cm245</id> <comp>0.0002109132</comp> </nuclide>
</recipe>
<recipe>
<name>moxoutrecipe</name>
<basis>mass</basis>
<nuclide> <id>H1</id> <comp>1.6592893412427388e-06</comp> </nuclide>
<nuclide> <id>H1</id> <comp>1.6592893412427388e-06</comp> </nuclide>
<nuclide> <id>H2</id> <comp>5.490295614406121e-07</comp> </nuclide>
<nuclide> <id>H3</id> <comp>5.774977609227179e-06</comp> </nuclide>
<nuclide> <id>He3</id> <comp>5.774977609227179e-06</comp> </nuclide>
<nuclide> <id>He4</id> <comp>8.914613323539418e-05</comp> </nuclide>
<nuclide> <id>Br85</id> <comp>0.0002334392357532676</comp> </nuclide>
<nuclide> <id>Kr82</id> <comp>7.117049870526452e-07</comp> </nuclide>
<nuclide> <id>Kr85</id> <comp>5.530964470809129e-05</comp> </nuclide>
<nuclide> <id>Kr85m</id> <comp>0.0002342526128813278</comp> </nuclide>
<nuclide> <id>Sr90</id> <comp>0.000818664079392557</comp> </nuclide>
<nuclide> <id>Zr95</id> <comp>0.0020127017033848807</comp> </nuclide>
<nuclide> <id>Nb94</id> <comp>6.832367875705393e-09</comp> </nuclide>
<nuclide> <id>Nb95</id> <comp>0.0020114816376927904</comp> </nuclide>
<nuclide> <id>Nb95m</id> <comp>2.175783817560944e-05</comp> </nuclide>
<nuclide> <id>Mo94</id> <comp>1.4640788305082986e-11</comp> </nuclide>
<nuclide> <id>Mo96</id> <comp>2.0741116765534236e-06</comp> </nuclide>
<nuclide> <id>Mo99</id> <comp>0.002515368768526063</comp> </nuclide>
<nuclide> <id>Tc99</id> <comp>0.002514962079962033</comp> </nuclide>
<nuclide> <id>Ru103</id> <comp>0.002825672142881017</comp> </nuclide>
<nuclide> <id>Ru106</id> <comp>0.0017032117061579876</comp> </nuclide>
<nuclide> <id>Rh106</id> <comp>0.0017032117061579876</comp> </nuclide>
<nuclide> <id>Sn121m</id> <comp>2.1147805329564317e-06</comp> </nuclide>
<nuclide> <id>Sb122</id> <comp>9.760525536721992e-09</comp> </nuclide>
<nuclide> <id>Sb124</id> <comp>9.272499259885892e-07</comp> </nuclide>
<nuclide> <id>Sb125</id> <comp>4.758256199151971e-05</comp> </nuclide>
<nuclide> <id>Te132</id> <comp>0.002072078233733273</comp> </nuclide>
<nuclide> <id>I129</id> <comp>0.0005722108095903268</comp> </nuclide>
<nuclide> <id>I131</id> <comp>0.0015145082124480291</comp> </nuclide>
<nuclide> <id>I133</id> <comp>0.002834619291289678</comp> </nuclide>
<nuclide> <id>I135</id> <comp>0.0025743386103104253</comp> </nuclide>
<nuclide> <id>Xe128</id> <comp>9.516512398303943e-10</comp> </nuclide>
<nuclide> <id>Xe130</id> <comp>6.751030162899378e-07</comp> </nuclide>
<nuclide> <id>Xe131m</id> <comp>1.647088684321836e-05</comp> </nuclide>
<nuclide> <id>Xe133</id> <comp>0.0028427530625702805</comp> </nuclide>
<nuclide> <id>Xe133m</id> <comp>8.784472983049792e-05</comp> </nuclide>
<nuclide> <id>Xe135</id> <comp>0.002993227831261411</comp> </nuclide>
<nuclide> <id>Xe135m</id> <comp>0.0007239056439735477</comp> </nuclide>
<nuclide> <id>Cs134</id> <comp>2.724813379001556e-07</comp> </nuclide>
<nuclide> <id>Cs137</id> <comp>0.0026792642598301867</comp> </nuclide>
<nuclide> <id>Ba140</id> <comp>0.0021643965377681016</comp> </nuclide>
<nuclide> <id>La140</id> <comp>0.0021688701119724325</comp> </nuclide>
<nuclide> <id>Ce141</id> <comp>0.002116813975776582</comp> </nuclide>
<nuclide> <id>Ce144</id> <comp>0.0015271155579329617</comp> </nuclide>
<nuclide> <id>Pr144</id> <comp>0.0015275222464969918</comp> </nuclide>
<nuclide> <id>Nd142</id> <comp>5.896984178436203e-10</comp> </nuclide>
<nuclide> <id>Nd144</id> <comp>0.0015275222464969918</comp> </nuclide>
<nuclide> <id>Nd147</id> <comp>0.0008312714248774897</comp> </nuclide>
<nuclide> <id>Pm147</id> <comp>0.0008312714248774897</comp> </nuclide>
<nuclide> <id>Pm148</id> <comp>2.2774559585684646e-09</comp> </nuclide>
<nuclide> <id>Pm148m</id> <comp>4.798925055554979e-09</comp> </nuclide>
<nuclide> <id>Pm149</id> <comp>0.0005136476563699948</comp> </nuclide>
<nuclide> <id>Pm151</id> <comp>0.0003155903256873444</comp> </nuclide>
<nuclide> <id>Sm148</id> <comp>6.832367875705393e-09</comp> </nuclide>
<nuclide> <id>Sm150</id> <comp>9.231830403482883e-07</comp> </nuclide>
<nuclide> <id>Sm151</id> <comp>0.0003155903256873444</comp> </nuclide>
<nuclide> <id>Sm153</id> <comp>0.00015454165433143155</comp> </nuclide>
<nuclide> <id>Eu151</id> <comp>0.0003155903256873444</comp> </nuclide>
<nuclide> <id>Eu152</id> <comp>7.93042699858662e-11</comp> </nuclide>
<nuclide> <id>Eu154</id> <comp>1.9927739637474066e-08</comp> </nuclide>
<nuclide> <id>Eu155</id> <comp>7.076381014123444e-05</comp> </nuclide>
<nuclide> <id>Pu238</id> <comp>0.0044566634772000005</comp> </nuclide>
<nuclide> <id>Pu239</id> <comp>0.03025956446406</comp> </nuclide>
<nuclide> <id>Pu240</id> <comp>0.020955172652820004</comp> </nuclide>
<nuclide> <id>Pu241</id> <comp>0.010839136847400002</comp> </nuclide>
<nuclide> <id>Pu242</id> <comp>0.00728930104722</comp> </nuclide>
<nuclide> <id>Pu244</id> <comp>1.61517158245631e-07</comp> </nuclide>
<nuclide> <id>U232</id> <comp>6.073368072739612e-10</comp> </nuclide>
<nuclide> <id>U233</id> <comp>1.0603318711422634e-08</comp> </nuclide>
<nuclide> <id>U234</id> <comp>0.00027194035646</comp> </nuclide>
<nuclide> <id>U235</id> <comp>0.00444193081428</comp> </nuclide>
<nuclide> <id>U236</id> <comp>0.005230015263780001</comp> </nuclide>
<nuclide> <id>U238</id> <comp>0.84785610241408</comp> </nuclide>
<nuclide> <id>Am241</id> <comp>0.16971099736</comp> </nuclide>
<nuclide> <id>Am242m</id> <comp>0.00401628096</comp> </nuclide>
<nuclide> <id>Am243</id> <comp>0.1661221824</comp> </nuclide>
<nuclide> <id>Cm242</id> <comp>0.02195556216</comp> </nuclide>
<nuclide> <id>Cm243</id> <comp>0.00100805784</comp> </nuclide>
<nuclide> <id>Cm244</id> <comp>0.08118693216</comp> </nuclide>
<nuclide> <id>Cm245</id> <comp>0.010176278</comp> </nuclide>
<nuclide> <id>Cm246</id> <comp>0.0004897534400000001</comp> </nuclide>
<nuclide> <id>Cm247</id> <comp>9.618019982286001e-06</comp> </nuclide>
<nuclide> <id>Cm248</id> <comp>7.304431593512233e-07</comp> </nuclide>
<nuclide> <id>Cm250</id> <comp>2.97791762967296e-15</comp> </nuclide>
<nuclide> <id>Cf249</id> <comp>3.2000000000000005e-09</comp> </nuclide>
<nuclide> <id>Cf250</id> <comp>2.339108839804712e-09</comp> </nuclide>
<nuclide> <id>Cf251</id> <comp>1.154800409150096e-09</comp> </nuclide>
<nuclide> <id>Cf252</id> <comp>6.009268703691961e-10</comp> </nuclide>
<nuclide> <id>Np237</id> <comp>0.34532360008</comp> </nuclide>
</recipe>
"""
region = {}
for calc_method in calc_methods:
region[calc_method] = """
<region>
<config>
<NullRegion>
</NullRegion>
</config>
<institution>
<config>
<DemandDrivenDeploymentInst>
<calc_method>%s</calc_method>
<demand_eq>%s</demand_eq>
<installed_cap>1</installed_cap>
<steps>1</steps>
<back_steps>2</back_steps>
<facility_commod>
<item>
<facility>source</facility>
<commod>sourceout</commod>
</item>
<item>
<facility>enrichment</facility>
<commod>enrichmentout</commod>
</item>
<item>
<facility>fr</facility>
<commod>POWER</commod>
</item>
<item>
<facility>moxlwr</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr1</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr2</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr3</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr4</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr5</facility>
<commod>POWER</commod>
</item>
<item>
<facility>lwr6</facility>
<commod>POWER</commod>
</item>
<item>
<facility>frmixer</facility>
<commod>frmixerout</commod>
</item>
<item>
<facility>moxmixer</facility>
<commod>moxmixerout</commod>
</item>
</facility_commod>
<facility_capacity>
<item>
<facility>source</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>enrichment</facility>
<capacity>1e100</capacity>
</item>
<item>
<facility>fr</facility>
<capacity>333.34</capacity>
</item>
<item>
<facility>moxlwr</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr1</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr2</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr3</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr4</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr5</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>lwr6</facility>
<capacity>1000</capacity>
</item>
<item>
<facility>frmixer</facility>
<capacity>%s</capacity>
</item>
<item>
<facility>moxmixer</facility>
<capacity>%s</capacity>
</item>
</facility_capacity>
<facility_pref>
<item>
<facility>fr</facility>
<pref>(t-959)/np.abs(t-959)</pref>
</item>
<item>
<facility>moxlwr</facility>
<pref>(t-959)/np.abs(t-959)</pref>
</item>
<item>
<facility>lwr1</facility>
<pref>-1</pref>
</item>
<item>
<facility>lwr2</facility>
<pref>-1</pref>
</item>
<item>
<facility>lwr3</facility>
<pref>-1</pref>
</item>
<item>
<facility>lwr4</facility>
<pref>-1</pref>
</item>
<item>
<facility>lwr5</facility>
<pref>-1</pref>
</item>
<item>
<facility>lwr6</facility>
<pref>-1</pref>
</item>
</facility_pref>
<facility_sharing>
<item>
<facility>fr</facility>
<percentage>85</percentage>
</item>
<item>
<facility>moxlwr</facility>
<percentage>15</percentage>
</item>
</facility_sharing>
<buffer_type>
<item>
<commod>POWER</commod>
<type>abs</type>
</item>
</buffer_type>
<supply_buffer>
<item>
<commod>POWER</commod>
<buffer>%s</buffer>
</item>
</supply_buffer>
</DemandDrivenDeploymentInst>
</config>
<name>timeseriesinst</name>
<initialfacilitylist>
<entry>
<number>10</number>
<prototype>lwr1</prototype>
</entry>
<entry>
<number>10</number>
<prototype>lwr2</prototype>
</entry>
<entry>
<number>10</number>
<prototype>lwr3</prototype>
</entry>
<entry>
<number>10</number>
<prototype>lwr4</prototype>
</entry>
<entry>
<number>10</number>
<prototype>lwr5</prototype>
</entry>
<entry>
<number>10</number>
<prototype>lwr6</prototype>
</entry>
</initialfacilitylist>
</institution>
<institution>
<config>
<SupplyDrivenDeploymentInst>
<calc_method>%s</calc_method>
<steps>1</steps>
<back_steps>2</back_steps>
<facility_commod>
<item>
<facility>lwrstorage</facility>
<commod>lwrout</commod>
</item>
<item>
<facility>frstorage</facility>
<commod>frout</commod>
</item>
<item>
<facility>moxstorage</facility>
<commod>moxout</commod>
</item>
<item>
<facility>lwrreprocessing</facility>
<commod>lwrstorageout</commod>
</item>
<item>
<facility>frreprocessing</facility>
<commod>frstorageout</commod>
</item>
<item>
<facility>moxreprocessing</facility>
<commod>moxstorageout</commod>
</item>
<item>
<facility>lwrsink</facility>
<commod>lwrreprocessingwaste</commod>
</item>
<item>
<facility>frsink</facility>
<commod>frreprocessingwaste</commod>
</item>
<item>
<facility>moxsink</facility>
<commod>moxreprocessingwaste</commod>
</item>
</facility_commod>
<facility_capacity>
<item>
<facility>lwrstorage</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>frstorage</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>moxstorage</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>lwrreprocessing</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>frreprocessing</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>moxreprocessing</facility>
<capacity>1e8</capacity>
</item>
<item>
<facility>lwrsink</facility>
<capacity>1e20</capacity>
</item>
<item>
<facility>frsink</facility>
<capacity>1e20</capacity>
</item>
<item>
<facility>moxsink</facility>
<capacity>1e20</capacity>
</item>
</facility_capacity>
</SupplyDrivenDeploymentInst>
</config>
<name>supplydrivendeploymentinst</name>
</institution>
<name>SingleRegion</name>
</region>""" % (calc_method, demand_eq, thro_frmixer, thro_moxmixer,
buff_size, calc_method)
for calc_method in calc_methods:
input_file = name + buff_size + '-' + calc_method + '.xml'
output_file = name + buff_size + '-' + calc_method + '.sqlite'
with open(input_file, 'w') as f:
f.write('<simulation>\n')
f.write(control)
f.write(region[calc_method])
f.write(recipes)
f.write('</simulation>')
s = subprocess.check_output(['cyclus', '-o', output_file, input_file],
universal_newlines=True, env=ENV)
| 38.506523
| 78
| 0.532547
|
e41d57bf8e729641fb12777f3e64e64168272313
| 280
|
py
|
Python
|
6 kyu/Triangle type.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 6
|
2020-09-03T09:32:25.000Z
|
2020-12-07T04:10:01.000Z
|
6 kyu/Triangle type.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | 1
|
2021-12-13T15:30:21.000Z
|
2021-12-13T15:30:21.000Z
|
6 kyu/Triangle type.py
|
mwk0408/codewars_solutions
|
9b4f502b5f159e68024d494e19a96a226acad5e5
|
[
"MIT"
] | null | null | null |
def triangle_type(a, b, c):
res=[a, b, c]
if max(res)>=sum(res)-max(res):
return 0
return 1 if sum(j**2 for i,j in enumerate(res) if i!=res.index(max(res)))>max(res)**2 else 2 if sum(j**2 for i,j in enumerate(res) if i!=res.index(max(res)))==max(res)**2 else 3
| 56
| 181
| 0.603571
|
a5340ceb8a96afd3ce04ae600afcf419d265289e
| 3,218
|
py
|
Python
|
tensorflow/lite/testing/op_tests/gather.py
|
carchrae/tensorflow
|
6a69a6b2e286b14ac9ae813998bb0d78b6fee440
|
[
"Apache-2.0"
] | 12
|
2020-12-28T18:42:10.000Z
|
2022-03-24T17:34:21.000Z
|
tensorflow/lite/testing/op_tests/gather.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/lite/testing/op_tests/gather.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 3
|
2020-11-13T09:01:58.000Z
|
2021-08-02T08:04:14.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
"constant_params": [False, True],
},
{
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3], [3, 2]],
"axis": [0],
"constant_params": [False, True],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
inputs = []
if parameters["constant_params"]:
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
else:
params = tf.compat.v1.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
inputs.append(params)
indices = tf.compat.v1.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
inputs.append(indices)
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if not parameters["constant_params"]:
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
input_values.append(params)
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
input_values.append(indices)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
| 35.362637
| 80
| 0.63238
|
814df019d1f823063ac72fb938da1ce8eb8a8f21
| 5,635
|
py
|
Python
|
tests/forecast/test_baggingtrees.py
|
cnll0075/Merlion
|
37fb75ccb204d128fde8ad4230f7893da724cf7c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/forecast/test_baggingtrees.py
|
cnll0075/Merlion
|
37fb75ccb204d128fde8ad4230f7893da724cf7c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/forecast/test_baggingtrees.py
|
cnll0075/Merlion
|
37fb75ccb204d128fde8ad4230f7893da724cf7c
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2022 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
from os.path import abspath, dirname, join
import sys
import unittest
from merlion.utils import TimeSeries
from ts_datasets.forecast import SeattleTrail
from merlion.transform.normalize import MinMaxNormalize
from merlion.transform.sequence import TransformSequence
from merlion.transform.resample import TemporalResample
from merlion.transform.bound import LowerUpperClip
from merlion.transform.moving_average import DifferenceTransform
from merlion.models.forecast.trees import RandomForestForecaster, RandomForestForecasterConfig
from merlion.models.forecast.seq_ar_common import gen_next_seq_label_pairs
logger = logging.getLogger(__name__)
rootdir = dirname(dirname(dirname(abspath(__file__))))
class TestRandomForestForecaster(unittest.TestCase):
"""
we test data loading, model instantiation, forecasting consistency, in particular
(1) load a testing data
(2) transform data
(3) instantiate the model and train
(4) forecast, and the forecasting result agrees with the reference
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_forecast_steps = 2
self.maxlags = 6
self.i = 0
# t = int(datetime(2019, 1, 1, 0, 0, 0).timestamp())
dataset = "seattle_trail"
d, md = SeattleTrail(rootdir=join(rootdir, "data", "multivariate", dataset))[0]
d_uni = d["BGT North of NE 70th Total"]
t = int(d[md["trainval"]].index[-1].to_pydatetime().timestamp())
data = TimeSeries.from_pd(d)
cleanup_transform = TransformSequence(
[TemporalResample(missing_value_policy="FFill"), LowerUpperClip(upper=300), DifferenceTransform()]
)
cleanup_transform.train(data)
data = cleanup_transform(data)
train_data, test_data = data.bisect(t)
minmax_transform = MinMaxNormalize()
minmax_transform.train(train_data)
self.train_data_norm = minmax_transform(train_data)
self.test_data_norm = minmax_transform(test_data)
data_uni = TimeSeries.from_pd(d_uni)
cleanup_transform = TransformSequence(
[TemporalResample(missing_value_policy="FFill"), LowerUpperClip(upper=300), DifferenceTransform()]
)
cleanup_transform.train(data_uni)
data_uni = cleanup_transform(data_uni)
train_data_uni, test_data_uni = data_uni.bisect(t)
minmax_transform = MinMaxNormalize()
minmax_transform.train(train_data_uni)
self.train_data_uni_norm = minmax_transform(train_data_uni)
self.test_data_uni_norm = minmax_transform(test_data_uni)
self.model = RandomForestForecaster(
RandomForestForecasterConfig(
max_forecast_steps=self.max_forecast_steps,
maxlags=self.maxlags,
target_seq_index=self.i,
sampling_mode="stats",
prediction_stride=1,
n_estimators=20,
)
)
def test_forecast_multi(self):
logger.info("Training model...")
yhat, _ = self.model.train(self.train_data_norm)
name = self.model.target_name
self.assertAlmostEqual(yhat.univariates[name].np_values.mean(), 0.50, 1)
self.assertEqual(len(self.model._forecast), self.max_forecast_steps)
self.assertAlmostEqual(self.model._forecast.mean(), 0.50, 1)
testing_data_gen = gen_next_seq_label_pairs(self.test_data_norm, self.i, self.maxlags, self.max_forecast_steps)
testing_instance, testing_label = next(testing_data_gen)
pred, _ = self.model.forecast(testing_label.time_stamps, testing_instance)
self.assertEqual(len(pred), self.max_forecast_steps)
pred = pred.univariates[name].np_values
self.assertAlmostEqual(pred.mean(), 0.50, 1)
# save and load
self.model.save(dirname=join(rootdir, "tmp", "randomforestforecaster"))
loaded_model = RandomForestForecaster.load(dirname=join(rootdir, "tmp", "randomforestforecaster"))
new_pred, _ = loaded_model.forecast(testing_label.time_stamps, testing_instance)
self.assertEqual(len(new_pred), self.max_forecast_steps)
new_pred = new_pred.univariates[name].np_values
self.assertAlmostEqual(pred.mean(), new_pred.mean(), 5)
def test_forecast_uni(self):
logger.info("Training model...")
self.model.config.prediction_stride = 2
yhat, _ = self.model.train(self.train_data_uni_norm)
name = self.model.target_name
self.assertAlmostEqual(yhat.univariates[name].np_values.mean(), 0.50, 1)
self.assertEqual(len(self.model._forecast), self.max_forecast_steps)
self.assertAlmostEqual(self.model._forecast.mean(), 0.50, 1)
testing_data_gen = gen_next_seq_label_pairs(
self.test_data_uni_norm, self.i, self.maxlags, self.max_forecast_steps
)
testing_instance, testing_label = next(testing_data_gen)
pred, _ = self.model.forecast(testing_label.time_stamps, testing_instance)
self.assertEqual(len(pred), self.max_forecast_steps)
pred = pred.univariates[name].np_values
self.assertAlmostEqual(pred.mean(), 0.50, 1)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", stream=sys.stdout, level=logging.DEBUG
)
unittest.main()
| 42.368421
| 119
| 0.701331
|
c2e76b3344f31d19b4e3dfb380849c69a281fc4d
| 65,421
|
py
|
Python
|
osh/cmd_exec.py
|
adisbladis/oil
|
8ae78500da543dfa899404bdca830b90277d17ad
|
[
"Apache-2.0"
] | null | null | null |
osh/cmd_exec.py
|
adisbladis/oil
|
8ae78500da543dfa899404bdca830b90277d17ad
|
[
"Apache-2.0"
] | null | null | null |
osh/cmd_exec.py
|
adisbladis/oil
|
8ae78500da543dfa899404bdca830b90277d17ad
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_exec.py -- Interpreter for the command language.
Problems:
$ < Makefile cat | < NOTES.txt head
This just does head? Last one wins.
"""
from __future__ import print_function
import resource
import time
import sys
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.syntax_asdl import (
command_e, command__Proc, redir_e, assign_op_e, source, proc_sig_e,
)
from _devbuild.gen.syntax_asdl import word, command_t
from _devbuild.gen.runtime_asdl import (
lvalue, lvalue_e,
value, value_e, value_t,
redirect, scope_e, var_flags_e, builtin_e,
arg_vector, cmd_value, cmd_value_e,
)
from _devbuild.gen.types_asdl import redir_arg_type_e
from asdl import runtime
from core import main_loop
from core import process
from core import ui
from core import util
from core.util import log, e_die
from core.meta import REDIR_ARG_TYPES, REDIR_DEFAULT_FD
from frontend import args
from frontend import reader
from oil_lang import builtin_oil
from oil_lang import objects
from osh import braces
from osh import builtin
from osh import builtin_pure
from osh import expr_eval
from osh import state
from osh import word_
import posix_ as posix
try:
import libc # for fnmatch
except ImportError:
from benchmarks import fake_libc as libc # type: ignore
from typing import List, Dict, Any
# These are nodes that execute more than one COMMAND. DParen doesn't
# count because there are no commands.
# - AndOr has multiple commands, but uses exit code in boolean way
_DISALLOWED = (
command_e.DoGroup, # covers ForEach and ForExpr, but not WhileUntil/If
command_e.BraceGroup, command_e.Subshell,
command_e.WhileUntil, command_e.If, command_e.Case,
command_e.TimeBlock,
command_e.CommandList, # Happens in $(command sub)
)
def _DisallowErrExit(node):
# type: (command_t) -> bool
if node.tag in _DISALLOWED:
return True
# '! foo' is a pipeline according to the POSIX shell grammar, but it's NOT
# disallowed! It's not more than one command.
if node.tag == command_e.Pipeline and len(node.children) > 1:
return True
return False
class _ControlFlow(RuntimeError):
"""Internal execption for control flow.
break and continue are caught by loops, return is caught by functions.
NOTE: I tried representing this in ASDL, but in Python the base class has to
be BaseException. Also, 'token' is in syntax.asdl but not runtime.asdl.
cflow =
-- break, continue, return, exit
Shell(token keyword, int arg)
-- break, continue
| OilLoop(token keyword)
-- return
| OilReturn(token keyword, value val)
"""
def __init__(self, token, arg):
"""
Args:
token: the keyword token
"""
self.token = token
self.arg = arg
def IsReturn(self):
return self.token.id == Id.ControlFlow_Return
def IsBreak(self):
return self.token.id == Id.ControlFlow_Break
def IsContinue(self):
return self.token.id == Id.ControlFlow_Continue
def StatusCode(self):
assert self.IsReturn()
return self.arg
def __repr__(self):
return '<_ControlFlow %s>' % self.token
class Deps(object):
def __init__(self):
self.splitter = None
self.word_ev = None
self.arith_ev = None
self.bool_ev = None
self.expr_ev = None # for Oil expressions
self.ex = None
self.prompt_ev = None
self.search_path = None
self.ext_prog = None
self.dumper = None
self.tracer = None
self.errfmt = None
self.debug_f = None
self.trace_f = None
self.traps = None # signal/hook name -> callable
self.trap_nodes = None # list of nodes, appended to by signal handlers
self.job_state = None
self.waiter = None
def _PyObjectToVal(py_val):
"""
Maintain the 'value' invariant in osh/runtime.asdl.
TODO: Move this to Mem and combine with LookupVar in oil_lang/expr_eval.py.
They are opposites.
"""
if isinstance(py_val, str): # var s = "hello $name"
val = value.Str(py_val)
elif isinstance(py_val, objects.StrArray): # var a = @(a b)
# It's safe to convert StrArray to MaybeStrArray.
val = value.MaybeStrArray(py_val)
elif isinstance(py_val, dict): # var d = {name: "bob"}
# TODO: Is this necessary? Shell assoc arrays aren't nested and don't have
# arbitrary values.
val = value.AssocArray(py_val)
else:
val = value.Obj(py_val)
return val
class Executor(object):
"""Executes the program by tree-walking.
It also does some double-dispatch by passing itself into Eval() for
Compound/WordPart.
"""
def __init__(self, mem, fd_state, procs, builtins, exec_opts, parse_ctx,
exec_deps):
"""
Args:
mem: Mem instance for storing variables
fd_state: FdState() for managing descriptors
procs: dict of SHELL functions or 'procs'
builtins: dict of builtin callables (TODO: migrate all builtins here)
exec_opts: ExecOpts
parse_ctx: for instantiating parsers
exec_deps: A bundle of stateless code
"""
self.mem = mem
self.fd_state = fd_state
self.procs = procs
self.builtins = builtins
# This is for shopt and set -o. They are initialized by flags.
self.exec_opts = exec_opts
self.parse_ctx = parse_ctx
self.arena = parse_ctx.arena
self.aliases = parse_ctx.aliases # alias name -> string
self.dumper = exec_deps.dumper
self.errfmt = exec_deps.errfmt
self.debug_f = exec_deps.debug_f # Used by ShellFuncAction too
self.word_ev = exec_deps.word_ev
self.arith_ev = exec_deps.arith_ev
self.bool_ev = exec_deps.bool_ev
self.expr_ev = exec_deps.expr_ev
self.search_path = exec_deps.search_path
self.ext_prog = exec_deps.ext_prog
self.traps = exec_deps.traps
self.trap_nodes = exec_deps.trap_nodes
self.targets = [] # make syntax enters stuff here -- Target()
# metaprogramming or regular target syntax
# Whether argv[0] is make determines if it is executed
# sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
self.job_state = exec_deps.job_state
self.waiter = exec_deps.waiter
self.tracer = exec_deps.tracer
self.loop_level = 0 # for detecting bad top-level break/continue
self.check_command_sub_status = False # a hack
def _EvalHelper(self, c_parser, src):
self.arena.PushSource(src)
try:
return main_loop.Batch(self, c_parser, self.arena)
finally:
self.arena.PopSource()
def _Eval(self, arg_vec):
if self.exec_opts.strict_eval_builtin:
# To be less confusing, eval accepts EXACTLY one string arg.
n = len(arg_vec.strs)
if n != 2:
raise args.UsageError('requires exactly 1 argument, got %d' % (n-1))
code_str = arg_vec.strs[1]
else:
code_str = ' '.join(arg_vec.strs[1:])
eval_spid = arg_vec.spids[0]
line_reader = reader.StringLineReader(code_str, self.arena)
c_parser = self.parse_ctx.MakeOshParser(line_reader)
src = source.EvalArg(eval_spid)
return self._EvalHelper(c_parser, src)
def ParseTrapCode(self, code_str):
"""
Returns:
A node, or None if the code is invalid.
"""
line_reader = reader.StringLineReader(code_str, self.arena)
c_parser = self.parse_ctx.MakeOshParser(line_reader)
# TODO: the SPID should be passed through argv
self.arena.PushSource(source.Trap(runtime.NO_SPID))
try:
try:
node = main_loop.ParseWholeFile(c_parser)
except util.ParseError as e:
ui.PrettyPrintError(e, self.arena)
return None
finally:
self.arena.PopSource()
return node
def _Source(self, arg_vec):
argv = arg_vec.strs
call_spid = arg_vec.spids[0]
try:
path = argv[1]
except IndexError:
raise args.UsageError('missing required argument')
resolved = self.search_path.Lookup(path, exec_required=False)
if resolved is None:
resolved = path
try:
f = self.fd_state.Open(resolved) # Shell can't use descriptors 3-9
except OSError as e:
self.errfmt.Print('source %r failed: %s', path, posix.strerror(e.errno),
span_id=arg_vec.spids[1])
return 1
try:
line_reader = reader.FileLineReader(f, self.arena)
c_parser = self.parse_ctx.MakeOshParser(line_reader)
# A sourced module CAN have a new arguments array, but it always shares
# the same variable scope as the caller. The caller could be at either a
# global or a local scope.
source_argv = argv[2:]
self.mem.PushSource(path, source_argv)
try:
status = self._EvalHelper(c_parser, source.SourcedFile(path, call_spid))
finally:
self.mem.PopSource(source_argv)
return status
except _ControlFlow as e:
if e.IsReturn():
return e.StatusCode()
else:
raise
finally:
f.close()
def _Exec(self, arg_vec):
# Apply redirects in this shell. # NOTE: Redirects were processed earlier.
if len(arg_vec.strs) == 1:
return 0
environ = self.mem.GetExported()
cmd = arg_vec.strs[1]
argv0_path = self.search_path.CachedLookup(cmd)
if argv0_path is None:
self.errfmt.Print('exec: %r not found', cmd,
span_id=arg_vec.spids[1])
sys.exit(127) # exec never returns
# shift off 'exec'
arg_vec2 = arg_vector(arg_vec.strs[1:], arg_vec.spids[1:])
self.ext_prog.Exec(argv0_path, arg_vec2, environ) # NEVER RETURNS
def _RunBuiltinAndRaise(self, builtin_id, cmd_val, fork_external):
"""
Raises:
args.UsageError
"""
# Shift one arg. Builtins don't need to know their own name.
argv = cmd_val.argv[1:]
# STUB for compatibility
arg_vec = arg_vector(cmd_val.argv, cmd_val.arg_spids)
# TODO: For now, hard-code the builtins that take a block, and pass them
# cmd_val.
# Later, we should give builtins signatures like this and check them:
#
# proc cd(argv Array[Str], b Block) {
# do evaluate(b, locals, globals)
# }
# Most builtins dispatch with a dictionary
builtin_func = self.builtins.get(builtin_id)
if builtin_func is not None:
# Pass the block
if isinstance(builtin_func,
(builtin.Cd, builtin_oil.Use, builtin_oil.Json)):
status = builtin_func(cmd_val)
else:
status = builtin_func(arg_vec)
# Some builtins "belong" to the executor.
elif builtin_id == builtin_e.EXEC:
status = self._Exec(arg_vec) # may never return
# But if it returns, then we want to permanently apply the redirects
# associated with it.
self.fd_state.MakePermanent()
elif builtin_id == builtin_e.EVAL:
status = self._Eval(arg_vec)
elif builtin_id in (builtin_e.SOURCE, builtin_e.DOT):
status = self._Source(arg_vec)
elif builtin_id == builtin_e.COMMAND:
# TODO: How do we handle fork_external? It doesn't fit the common
# signature. We also don't handle 'command local', etc.
b = builtin_pure.Command(self, self.procs, self.aliases,
self.search_path)
status = b(arg_vec, fork_external)
elif builtin_id == builtin_e.BUILTIN: # NOTE: uses early return style
if not argv:
return 0 # this could be an error in strict mode?
name = arg_vec.strs[1]
# Run regular builtin or special builtin
to_run = builtin.Resolve(name)
if to_run == builtin_e.NONE:
to_run = builtin.ResolveSpecial(name)
if to_run == builtin_e.NONE:
span_id = arg_vec.spids[1]
if builtin.ResolveAssign(name) != builtin_e.NONE:
# NOTE: There's a similar restriction for 'command'
self.errfmt.Print("Can't run assignment builtin recursively",
span_id=span_id)
else:
self.errfmt.Print("%r isn't a shell builtin", span_id=span_id)
return 1
cmd_val2 = cmd_value.Argv(cmd_val.argv[1:], cmd_val.arg_spids[1:],
cmd_val.block)
status = self._RunBuiltinAndRaise(to_run, cmd_val2, fork_external)
else:
raise AssertionError('Unhandled builtin: %s' % builtin_id)
assert isinstance(status, int)
return status
def _RunBuiltin(self, builtin_id, cmd_val, fork_external):
self.errfmt.PushLocation(cmd_val.arg_spids[0])
try:
status = self._RunBuiltinAndRaise(builtin_id, cmd_val, fork_external)
except args.UsageError as e:
arg0 = cmd_val.argv[0]
# fill in default location. e.g. osh/state.py raises UsageError without
# span_id.
if e.span_id == runtime.NO_SPID:
e.span_id = self.errfmt.CurrentLocation()
# e.g. 'type' doesn't accept flag '-x'
self.errfmt.Print(e.msg, prefix='%r ' % arg0, span_id=e.span_id)
status = 2 # consistent error code for usage error
except KeyboardInterrupt:
if self.exec_opts.interactive:
print() # newline after ^C
status = 130 # 128 + 2 for SIGINT
else:
# Abort a batch script
raise
finally:
# Flush stdout after running ANY builtin. This is very important!
# Silence errors like we did from 'echo'.
try:
sys.stdout.flush()
except IOError as e:
pass
self.errfmt.PopLocation()
return status
def _RunAssignBuiltin(self, cmd_val):
"""Run an assignment builtin. Except blocks copied from _RunBuiltin above."""
self.errfmt.PushLocation(cmd_val.arg_spids[0]) # defult
builtin_func = self.builtins[cmd_val.builtin_id] # must be there
try:
status = builtin_func(cmd_val)
except args.UsageError as e: # Copied from _RunBuiltin
arg0 = cmd_val.argv[0]
if e.span_id == runtime.NO_SPID: # fill in default location.
e.span_id = self.errfmt.CurrentLocation()
self.errfmt.Print(e.msg, prefix='%r ' % arg0, span_id=e.span_id)
status = 2 # consistent error code for usage error
except KeyboardInterrupt:
if self.exec_opts.interactive:
print() # newline after ^C
status = 130 # 128 + 2 for SIGINT
else:
raise
finally:
try:
sys.stdout.flush()
except IOError as e:
pass
self.errfmt.PopLocation()
return status
def _PushErrExit(self, span_id):
self.exec_opts.errexit.Push(span_id)
def _PopErrExit(self):
self.exec_opts.errexit.Pop()
# TODO: Also change to BareAssign (set global or mutate local) and
# KeywordAssign. The latter may have flags too.
def _SpanIdForShAssignment(self, node):
# TODO: Share with tracing (SetCurrentSpanId) and _CheckStatus
return node.spids[0]
def _CheckStatus(self, status, node):
"""Raises ErrExitFailure, maybe with location info attached."""
if self.exec_opts.ErrExit() and status != 0:
# NOTE: Sometimes location info is duplicated, like on UsageError, or a
# bad redirect. Also, pipelines can fail twice.
if node.tag == command_e.Simple:
reason = 'command in '
span_id = word_.LeftMostSpanForWord(node.words[0])
elif node.tag == command_e.ShAssignment:
reason = 'assignment in '
span_id = self._SpanIdForShAssignment(node)
elif node.tag == command_e.Subshell:
reason = 'subshell invoked from '
span_id = node.spids[0]
elif node.tag == command_e.Pipeline:
# The whole pipeline can fail separately
reason = 'pipeline invoked from '
span_id = node.spids[0] # only one spid
else:
# NOTE: The fallback of CurrentSpanId() fills this in.
reason = ''
span_id = runtime.NO_SPID
raise util.ErrExitFailure(
'Exiting with status %d (%sPID %d)', status, reason, posix.getpid(),
span_id=span_id, status=status)
def _EvalRedirect(self, n):
fd = REDIR_DEFAULT_FD[n.op.id] if n.fd == runtime.NO_SPID else n.fd
if n.tag == redir_e.Redir:
redir_type = REDIR_ARG_TYPES[n.op.id] # could be static in the LST?
if redir_type == redir_arg_type_e.Path:
# NOTES
# - no globbing. You can write to a file called '*.py'.
# - set -o strict-array prevents joining by spaces
val = self.word_ev.EvalWordToString(n.arg_word)
filename = val.s
if not filename:
# Whether this is fatal depends on errexit.
raise util.RedirectEvalError(
"Redirect filename can't be empty", word=n.arg_word)
return redirect.Path(n.op.id, fd, filename, n.op.span_id)
elif redir_type == redir_arg_type_e.Desc: # e.g. 1>&2
val = self.word_ev.EvalWordToString(n.arg_word)
t = val.s
if not t:
raise util.RedirectEvalError(
"Redirect descriptor can't be empty", word=n.arg_word)
return None
try:
target_fd = int(t)
except ValueError:
raise util.RedirectEvalError(
"Redirect descriptor should look like an integer, got %s", val,
word=n.arg_word)
return None
return redirect.FileDesc(n.op.id, fd, target_fd, n.op.span_id)
elif redir_type == redir_arg_type_e.Here: # here word
val = self.word_ev.EvalWordToString(n.arg_word)
assert val.tag == value_e.Str, val
# NOTE: bash and mksh both add \n
return redirect.HereDoc(fd, val.s + '\n', n.op.span_id)
else:
raise AssertionError('Unknown redirect op')
elif n.tag == redir_e.HereDoc:
# HACK: Wrap it in a word to evaluate.
w = word.Compound(n.stdin_parts)
val = self.word_ev.EvalWordToString(w)
assert val.tag == value_e.Str, val
return redirect.HereDoc(fd, val.s, n.op.span_id)
else:
raise AssertionError('Unknown redirect type')
def _EvalRedirects(self, node):
"""Evaluate redirect nodes to concrete objects.
We have to do this every time, because you could have something like:
for i in a b c; do
echo foo >$i
done
Does it makes sense to just have RedirectNode.Eval? Nah I think the
Redirect() abstraction in process.py is useful. It has a lot of methods.
Raises:
RedirectEvalError
"""
return [self._EvalRedirect(redir) for redir in node.redirects]
def _MakeProcess(self, node, parent_pipeline=None, inherit_errexit=True):
"""
Assume we will run the node in another process. Return a process.
"""
if node.tag == command_e.ControlFlow:
# Pipeline or subshells with control flow are invalid, e.g.:
# - break | less
# - continue | less
# - ( return )
# NOTE: This could be done at parse time too.
e_die('Invalid control flow %r in pipeline / subshell / background',
node.token.val, token=node.token)
# NOTE: If ErrExit(), we could be verbose about subprogram errors? This
# only really matters when executing 'exit 42', because the child shell
# inherits errexit and will be verbose. Other notes:
#
# - We might want errors to fit on a single line so they don't get
# interleaved.
# - We could turn the `exit` builtin into a FatalRuntimeError exception and
# get this check for "free".
thunk = process.SubProgramThunk(self, node,
inherit_errexit=inherit_errexit)
p = process.Process(thunk, self.job_state, parent_pipeline=parent_pipeline)
return p
def _RunSimpleCommand(self, cmd_val, fork_external):
"""Private interface to run a simple command (including assignment)."""
if cmd_val.tag == cmd_value_e.Argv:
return self.RunSimpleCommand(cmd_val, fork_external)
elif cmd_val.tag == cmd_value_e.Assign:
return self._RunAssignBuiltin(cmd_val)
else:
raise AssertionError
def RunSimpleCommand(self, cmd_val, fork_external, funcs=True):
"""Public interface to run a simple command (excluding assignment)
Args:
fork_external: for subshell ( ls / ) or ( command ls / )
"""
assert cmd_val.tag == cmd_value_e.Argv
argv = cmd_val.argv
span_id = cmd_val.arg_spids[0] if cmd_val.arg_spids else runtime.NO_SPID
# This happens when you write "$@" but have no arguments.
if not argv:
if self.exec_opts.strict_argv:
e_die("Command evaluated to an empty argv array",
span_id=span_id)
else:
return 0 # status 0, or skip it?
arg0 = argv[0]
builtin_id = builtin.ResolveAssign(arg0)
if builtin_id != builtin_e.NONE:
# command readonly is disallowed, for technical reasons. Could relax it
# later.
self.errfmt.Print("Can't run assignment builtin recursively",
span_id=span_id)
return 1
builtin_id = builtin.ResolveSpecial(arg0)
if builtin_id != builtin_e.NONE:
status = self._RunBuiltin(builtin_id, cmd_val, fork_external)
# TODO: Enable this and fix spec test failures.
# Also update _SPECIAL_BUILTINS in osh/builtin.py.
#if status != 0:
# e_die('special builtin failed', status=status)
return status
# Builtins like 'true' can be redefined as functions.
if funcs:
# TODO: if shopt -s namespaces, then look up in current namespace FIRST.
#
# Then fallback on self.procs, which should be renamed self.procs?
#
# honestly there is no real chance of colllision because
# foo-bar() {} can't be accessed anyway
# functions can have hyphens, but variables can't
func_node = self.procs.get(arg0)
if func_node is not None:
eo = self.exec_opts
if eo.strict_errexit and eo.errexit.SpidIfDisabled() != runtime.NO_SPID:
# NOTE: This would be checked below, but this gives a better error
# message.
e_die("can't disable errexit running a function. "
"Maybe wrap the function in a process with the at-splice "
"pattern.", span_id=span_id)
# NOTE: Functions could call 'exit 42' directly, etc.
status = self._RunProc(func_node, argv[1:])
return status
# TODO:
# look up arg0 in global namespace? And see if the type is value.Obj
# And it's a proc?
# isinstance(val.obj, objects.Proc)
val = self.mem.GetVar(arg0)
if val.tag == value_e.Obj and isinstance(val.obj, objects.Proc):
status = self._RunOilProc(val.obj, argv[1:])
return status
builtin_id = builtin.Resolve(arg0)
if builtin_id != builtin_e.NONE:
return self._RunBuiltin(builtin_id, cmd_val, fork_external)
environ = self.mem.GetExported() # Include temporary variables
if cmd_val.block:
e_die('Unexpected block passed to external command %r', arg0,
span_id=cmd_val.block.spids[0])
# Resolve argv[0] BEFORE forking.
argv0_path = self.search_path.CachedLookup(arg0)
if argv0_path is None:
self.errfmt.Print('%r not found', arg0, span_id=span_id)
return 127
arg_vec = arg_vector(cmd_val.argv, cmd_val.arg_spids)
if fork_external:
thunk = process.ExternalThunk(self.ext_prog, argv0_path, arg_vec, environ)
p = process.Process(thunk, self.job_state)
status = p.Run(self.waiter)
return status
self.ext_prog.Exec(argv0_path, arg_vec, environ) # NEVER RETURNS
def _RunPipeline(self, node):
pi = process.Pipeline()
# First n-1 processes (which is empty when n == 1)
n = len(node.children)
for i in xrange(n - 1):
p = self._MakeProcess(node.children[i], parent_pipeline=pi)
pi.Add(p)
# Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
pi.AddLast((self, node.children[n-1]))
pipe_status = pi.Run(self.waiter, self.fd_state)
self.mem.SetPipeStatus(pipe_status)
if self.exec_opts.pipefail:
# The status is that of the last command that is non-zero.
status = 0
for st in pipe_status:
if st != 0:
status = st
else:
status = pipe_status[-1] # status of last one is pipeline status
return status
def _RunJobInBackground(self, node):
# Special case for pipeline. There is some evidence here:
# https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
#
# "You can either make all the processes in the process group be children
# of the shell process, or you can make one process in group be the
# ancestor of all the other processes in that group. The sample shell
# program presented in this chapter uses the first approach because it
# makes bookkeeping somewhat simpler."
if node.tag == command_e.Pipeline:
pi = process.Pipeline()
for child in node.children:
pi.Add(self._MakeProcess(child, parent_pipeline=pi))
pi.Start(self.waiter)
last_pid = pi.LastPid()
self.mem.last_bg_pid = last_pid # for $!
job_id = self.job_state.AddJob(pi) # show in 'jobs' list
log('[%%%d] Started Pipeline with PID %d', job_id, last_pid)
else:
# Problem: to get the 'set -b' behavior of immediate notifications, we
# have to register SIGCHLD. But then that introduces race conditions.
# If we haven't called Register yet, then we won't know who to notify.
#log('job state %s', self.job_state)
p = self._MakeProcess(node)
pid = p.Start()
self.mem.last_bg_pid = pid # for $!
job_id = self.job_state.AddJob(p) # show in 'jobs' list
log('[%%%d] Started PID %d', job_id, pid)
return 0
def _EvalTempEnv(self, more_env, flags):
"""For FOO=1 cmd."""
for env_pair in more_env:
val = self.word_ev.EvalWordToString(env_pair.val)
# Set each var so the next one can reference it. Example:
# FOO=1 BAR=$FOO ls /
self.mem.SetVar(lvalue.Named(env_pair.name), val, flags,
scope_e.LocalOnly)
def _Dispatch(self, node, fork_external):
# If we call RunCommandSub in a recursive call to the executor, this will
# be set true (if strict-errexit is false). But it only lasts for one
# command.
self.check_command_sub_status = False
#argv0 = None # for error message
check_errexit = False # for errexit
if node.tag == command_e.Simple:
check_errexit = True
# Find span_id for a basic implementation of $LINENO, e.g.
# PS4='+$SOURCE_NAME:$LINENO:'
# NOTE: osh2oil uses node.more_env, but we don't need that.
span_id = runtime.NO_SPID
if node.words:
span_id = word_.LeftMostSpanForWord(node.words[0])
elif node.redirects:
span_id = node.redirects[0].op # note: this could be a here doc?
self.mem.SetCurrentSpanId(span_id)
# PROBLEM: We want to log argv in 'xtrace' mode, but we may have already
# redirected here, which screws up logging. For example, 'echo hi
# >/dev/null 2>&1'. We want to evaluate argv and log it BEFORE applying
# redirects.
# Another problem:
# - tracing can be called concurrently from multiple processes, leading
# to overlap. Maybe have a mode that creates a file per process.
# xtrace-proc
# - line numbers for every command would be very nice. But then you have
# to print the filename too.
words = braces.BraceExpandWords(node.words)
cmd_val = self.word_ev.EvalWordSequence2(words, allow_assign=True)
# STUB for compatibility.
if cmd_val.tag == cmd_value_e.Argv:
argv = cmd_val.argv
cmd_val.block = node.block # may be None
else:
argv = ['TODO: trace string for assignment']
if node.block:
e_die("ShAssignment builtins don't accept blocks",
span_id=node.block.spids[0])
# This comes before evaluating env, in case there are problems evaluating
# it. We could trace the env separately? Also trace unevaluated code
# with set-o verbose?
self.tracer.OnSimpleCommand(argv)
# NOTE: RunSimpleCommand never returns when fork_external=False!
if node.more_env: # I think this guard is necessary?
is_other_special = False # TODO: There are other special builtins too!
if cmd_val.tag == cmd_value_e.Assign or is_other_special:
# Special builtins have their temp env persisted.
self._EvalTempEnv(node.more_env, ())
status = self._RunSimpleCommand(cmd_val, fork_external)
else:
self.mem.PushTemp()
try:
self._EvalTempEnv(node.more_env, (var_flags_e.Exported,))
status = self._RunSimpleCommand(cmd_val, fork_external)
finally:
self.mem.PopTemp()
else:
status = self._RunSimpleCommand(cmd_val, fork_external)
elif node.tag == command_e.ExpandedAlias:
# Expanded aliases need redirects and env bindings from the calling
# context, as well as redirects in the expansion!
# TODO: SetCurrentSpanId to OUTSIDE? Don't bother with stuff inside
# expansion, since aliases are discouarged.
if node.more_env:
self.mem.PushTemp()
try:
self._EvalTempEnv(node.more_env, (var_flags_e.Exported,))
status = self._Execute(node.child)
finally:
self.mem.PopTemp()
else:
status = self._Execute(node.child)
elif node.tag == command_e.Sentence:
# Don't check_errexit since this isn't a real node!
if node.terminator.id == Id.Op_Semi:
status = self._Execute(node.child)
else:
status = self._RunJobInBackground(node.child)
elif node.tag == command_e.Pipeline:
check_errexit = True
if node.stderr_indices:
e_die("|& isn't supported", span_id=node.spids[0])
if node.negated:
self._PushErrExit(node.spids[0]) # ! spid
try:
status2 = self._RunPipeline(node)
finally:
self._PopErrExit()
# errexit is disabled for !.
check_errexit = False
status = 1 if status2 == 0 else 0
else:
status = self._RunPipeline(node)
elif node.tag == command_e.Subshell:
check_errexit = True
# This makes sure we don't waste a process if we'd launch one anyway.
p = self._MakeProcess(node.command_list)
status = p.Run(self.waiter)
elif node.tag == command_e.DBracket:
span_id = node.spids[0]
self.mem.SetCurrentSpanId(span_id)
check_errexit = True
result = self.bool_ev.Eval(node.expr)
status = 0 if result else 1
elif node.tag == command_e.DParen:
span_id = node.spids[0]
self.mem.SetCurrentSpanId(span_id)
check_errexit = True
i = self.arith_ev.Eval(node.child)
status = 0 if i != 0 else 1
elif node.tag == command_e.OilCondition:
# TODO: Do we need location information? Yes, probably for execptions in
# Oil expressions.
#span_id = node.spids[0]
#self.mem.SetCurrentSpanId(span_id)
obj = self.expr_ev.EvalExpr(node.e)
status = 0 if obj else 1
# TODO: Change x = 1 + 2*3 into its own Decl node.
elif node.tag == command_e.VarDecl and node.keyword is None:
self.mem.SetCurrentSpanId(node.lhs[0].name.span_id) # point to var name
# Note: there's only one LHS
lval = lvalue.Named(node.lhs[0].name.val)
py_val = self.expr_ev.EvalExpr(node.rhs)
val = _PyObjectToVal(py_val)
self.mem.SetVar(lval, val, (), scope_e.LocalOnly, keyword_id=None)
status = 0
elif node.tag == command_e.VarDecl:
self.mem.SetCurrentSpanId(node.keyword.span_id) # point to var
py_val = self.expr_ev.EvalExpr(node.rhs)
lvals = []
vals = []
if len(node.lhs) == 1: # TODO: optimize this common case (but measure)
lval = lvalue.Named(node.lhs[0].name.val)
val = _PyObjectToVal(py_val)
lvals.append(lval)
vals.append(val)
else:
it = py_val.__iter__()
for lhs in node.lhs:
lval = lvalue.Named(lhs.name.val)
val = _PyObjectToVal(it.next())
lvals.append(lval)
vals.append(val)
for lval, val in zip(lvals, vals):
self.mem.SetVar(
lval, val, (), scope_e.LocalOnly, keyword_id=node.keyword.id)
status = 0
elif node.tag == command_e.PlaceMutation:
self.mem.SetCurrentSpanId(node.keyword.span_id) # point to setvar/set
if node.op.id == Id.Arith_Equal:
py_val = self.expr_ev.EvalExpr(node.rhs)
lvals = []
vals = []
if len(node.lhs) == 1: # TODO: Optimize this common case (but measure)
# See ShAssignment
# EvalLhs
# EvalLhsAndLookup for +=
lval = self.expr_ev.EvalPlaceExpr(node.lhs[0])
val = _PyObjectToVal(py_val)
lvals.append(lval)
vals.append(val)
else:
it = py_val.__iter__()
for lhs in node.lhs:
lval = self.expr_ev.EvalPlaceExpr(lhs)
val = _PyObjectToVal(it.next())
lvals.append(lval)
vals.append(val)
# TODO: Change this to LocalOrGlobal
lookup_mode = scope_e.Dynamic
# TODO: Resolve the asymmetry betwen Named vs ObjIndex,ObjAttr.
for lval, val in zip(lvals, vals):
if lval.tag == lvalue_e.ObjIndex:
lval.obj[lval.index] = val.obj
elif lval.tag == lvalue_e.ObjAttr:
setattr(lval.obj, lval.attr, val.obj)
else:
# top level variable
self.mem.SetVar(lval, val, (), lookup_mode, keyword_id=node.keyword.id)
# TODO: Other augmented assignments
elif node.op.id == Id.Arith_PlusEqual:
# NOTE: x, y += 1 in Python is a SYNTAX error, but it's checked in the
# transformer and not the grammar. We should do that too.
lval = lvalue.Named(node.lhs[0].name.val)
py_val = self.expr_ev.EvalExpr(node.rhs)
new_py_val = self.expr_ev.EvalPlusEquals(lval, py_val)
# This should only be an int or float, so we don't need the logic above
val = value.Obj(new_py_val)
# TODO: This should be LocalOrGlobal too
self.mem.SetVar(lval, val, (), scope_e.LocalOnly,
keyword_id=node.keyword.id)
else:
raise NotImplementedError(node.op)
status = 0 # TODO: what should status be?
elif node.tag == command_e.ShAssignment: # Only unqualified assignment
lookup_mode = scope_e.Dynamic
for pair in node.pairs:
# Use the spid of each pair.
self.mem.SetCurrentSpanId(pair.spids[0])
if pair.op == assign_op_e.PlusEqual:
assert pair.rhs, pair.rhs # I don't think a+= is valid?
val = self.word_ev.EvalRhsWord(pair.rhs)
old_val, lval = expr_eval.EvalLhsAndLookup(pair.lhs, self.arith_ev,
self.mem, self.exec_opts,
lookup_mode=lookup_mode)
sig = (old_val.tag, val.tag)
if sig == (value_e.Undef, value_e.Str):
pass # val is RHS
elif sig == (value_e.Undef, value_e.MaybeStrArray):
pass # val is RHS
elif sig == (value_e.Str, value_e.Str):
val = value.Str(old_val.s + val.s)
elif sig == (value_e.Str, value_e.MaybeStrArray):
e_die("Can't append array to string")
elif sig == (value_e.MaybeStrArray, value_e.Str):
e_die("Can't append string to array")
elif sig == (value_e.MaybeStrArray, value_e.MaybeStrArray):
val = value.MaybeStrArray(old_val.strs + val.strs)
else: # plain assignment
spid = pair.spids[0] # Source location for tracing
lval = expr_eval.EvalLhs(pair.lhs, self.arith_ev, self.mem, spid,
lookup_mode)
# RHS can be a string or array.
if pair.rhs:
val = self.word_ev.EvalRhsWord(pair.rhs)
assert isinstance(val, value_t), val
else: # e.g. 'readonly x' or 'local x'
val = None
# NOTE: In bash and mksh, declare -a myarray makes an empty cell with
# Undef value, but the 'array' attribute.
#log('setting %s to %s with flags %s', lval, val, flags)
flags = ()
self.mem.SetVar(lval, val, flags, lookup_mode)
self.tracer.OnShAssignment(lval, pair.op, val, flags, lookup_mode)
# PATCH to be compatible with existing shells: If the assignment had a
# command sub like:
#
# s=$(echo one; false)
#
# then its status will be in mem.last_status, and we can check it here.
# If there was NOT a command sub in the assignment, then we don't want to
# check it.
# Only do this if there was a command sub? How? Look at node?
# Set a flag in mem? self.mem.last_status or
if self.check_command_sub_status:
last_status = self.mem.LastStatus()
self._CheckStatus(last_status, node)
status = last_status # A global assignment shouldn't clear $?.
else:
status = 0
elif node.tag == command_e.Return:
val = self.expr_ev.EvalExpr(node.e)
raise _ControlFlow(node.keyword, val)
elif node.tag == command_e.Expr:
obj = self.expr_ev.EvalExpr(node.e)
if node.keyword.id == Id.KW_Pp:
# NOTE: It would be nice to unify this with 'repr', but there isn't a
# good way to do it with the value/PyObject split.
print('(%s) %s' % (obj.__class__.__name__, repr(obj)))
# TODO: What about exceptions? They just throw?
status = 0
elif node.tag == command_e.ControlFlow:
tok = node.token
if node.arg_word: # Evaluate the argument
val = self.word_ev.EvalWordToString(node.arg_word)
assert val.tag == value_e.Str
try:
arg = int(val.s) # They all take integers
except ValueError:
e_die('%r expected a number, got %r',
node.token.val, val.s, word=node.arg_word)
else:
if tok.id in (Id.ControlFlow_Exit, Id.ControlFlow_Return):
arg = self.mem.LastStatus()
else:
arg = 0 # break 0 levels, nothing for continue
# NOTE: A top-level 'return' is OK, unlike in bash. If you can return
# from a sourced script, it makes sense to return from a main script.
ok = True
if (tok.id in (Id.ControlFlow_Break, Id.ControlFlow_Continue) and
self.loop_level == 0):
ok = False
if ok:
if tok.id == Id.ControlFlow_Exit:
raise util.UserExit(arg) # handled differently than other control flow
else:
raise _ControlFlow(tok, arg)
else:
msg = 'Invalid control flow at top level'
if self.exec_opts.strict_control_flow:
e_die(msg, token=tok)
else:
# Only print warnings, never fatal.
# Bash oddly only exits 1 for 'return', but no other shell does.
self.errfmt.Print(msg, prefix='warning: ', span_id=tok.span_id)
status = 0
# The only difference between these two is that CommandList has no
# redirects. We already took care of that above.
elif node.tag in (command_e.CommandList, command_e.BraceGroup):
status = self._ExecuteList(node.children)
check_errexit = False
elif node.tag == command_e.AndOr:
# NOTE: && and || have EQUAL precedence in command mode. See case #13
# in dbracket.test.sh.
left = node.children[0]
# Suppress failure for every child except the last one.
self._PushErrExit(node.spids[0])
try:
status = self._Execute(left)
finally:
self._PopErrExit()
i = 1
n = len(node.children)
while i < n:
#log('i %d status %d', i, status)
child = node.children[i]
op_id = node.ops[i-1]
#log('child %s op_id %s', child, op_id)
if op_id == Id.Op_DPipe and status == 0:
i += 1
continue # short circuit
elif op_id == Id.Op_DAmp and status != 0:
i += 1
continue # short circuit
if i == n - 1: # errexit handled differently for last child
status = self._Execute(child)
check_errexit = True
else:
self._PushErrExit(node.spids[i]) # blame the right && or ||
try:
status = self._Execute(child)
finally:
self._PopErrExit()
i += 1
elif node.tag == command_e.WhileUntil:
if node.keyword.id == Id.KW_While:
_DonePredicate = lambda status: status != 0
else:
_DonePredicate = lambda status: status == 0
status = 0
self.loop_level += 1
try:
while True:
self._PushErrExit(node.spids[0]) # while/until spid
try:
cond_status = self._ExecuteList(node.cond)
finally:
self._PopErrExit()
done = cond_status != 0
if _DonePredicate(cond_status):
break
try:
status = self._Execute(node.body) # last one wins
except _ControlFlow as e:
if e.IsBreak():
status = 0
break
elif e.IsContinue():
status = 0
continue
else: # return needs to pop up more
raise
finally:
self.loop_level -= 1
elif node.tag == command_e.ForEach:
self.mem.SetCurrentSpanId(node.spids[0]) # for x in $LINENO
iter_name = node.iter_name
if node.do_arg_iter:
iter_list = self.mem.GetArgv()
else:
words = braces.BraceExpandWords(node.iter_words)
iter_list = self.word_ev.EvalWordSequence(words)
# We need word splitting and so forth
# NOTE: This expands globs too. TODO: We should pass in a Globber()
# object.
status = 0 # in case we don't loop
self.loop_level += 1
try:
for x in iter_list:
#log('> ForEach setting %r', x)
state.SetLocalString(self.mem, iter_name, x)
#log('<')
try:
status = self._Execute(node.body) # last one wins
except _ControlFlow as e:
if e.IsBreak():
status = 0
break
elif e.IsContinue():
status = 0
else: # return needs to pop up more
raise
finally:
self.loop_level -= 1
elif node.tag == command_e.ForExpr:
status = 0
init, cond, body, update = node.init, node.cond, node.body, node.update
if init:
self.arith_ev.Eval(init)
self.loop_level += 1
try:
while True:
if cond:
b = self.arith_ev.Eval(cond)
if not b:
break
try:
status = self._Execute(body)
except _ControlFlow as e:
if e.IsBreak():
status = 0
break
elif e.IsContinue():
status = 0
else: # return needs to pop up more
raise
if update:
self.arith_ev.Eval(update)
finally:
self.loop_level -= 1
elif node.tag == command_e.OilForIn:
# NOTE: This is a metacircular implementation using the iterable
# protocol.
status = 0
obj = self.expr_ev.EvalExpr(node.iterable)
if isinstance(obj, str):
e_die("Strings aren't iterable")
else:
it = obj.__iter__()
body = node.body
iter_name = node.lhs[0].name.val # TODO: proper lvalue
while True:
try:
loop_val = it.next()
except StopIteration:
break
self.mem.SetVar(lvalue.Named(iter_name), _PyObjectToVal(loop_val), (),
scope_e.LocalOnly)
# Copied from above
try:
status = self._Execute(body)
except _ControlFlow as e:
if e.IsBreak():
status = 0
break
elif e.IsContinue():
status = 0
else: # return needs to pop up more
raise
elif node.tag == command_e.DoGroup:
status = self._ExecuteList(node.children)
check_errexit = False # not real statements
elif node.tag == command_e.ShFunction:
# TODO: if shopt -s namespaces, then enter it in self.mem
# self.mem.SetVar(value.Obj(...))
# NOTE: Would it make sense to evaluate the redirects BEFORE entering?
# It will save time on function calls.
self.procs[node.name] = node
status = 0
elif node.tag == command_e.Proc:
if node.sig.tag == proc_sig_e.Closed:
defaults = [None] * len(node.sig.params)
for i, param in enumerate(node.sig.params):
if param.default_val:
py_val = self.expr_ev.EvalExpr(param.default_val)
defaults[i] = _PyObjectToVal(py_val)
else:
defaults = None
obj = objects.Proc(node, defaults)
self.mem.SetVar(
lvalue.Named(node.name.val), value.Obj(obj), (), scope_e.GlobalOnly)
status = 0
elif node.tag == command_e.Func:
# Note: funcs have the Python pitfall where mutable objects shouldn't be
# used as default args.
pos_defaults = [None] * len(node.pos_params)
for i, param in enumerate(node.pos_params):
if param.default_val:
py_val = self.expr_ev.EvalExpr(param.default_val)
pos_defaults[i] = _PyObjectToVal(py_val)
named_defaults = {}
for i, param in enumerate(node.named_params):
if param.default_val:
obj = self.expr_ev.EvalExpr(param.default_val)
named_defaults[param.name.val] = value.Obj(obj)
obj = objects.Func(node, pos_defaults, named_defaults, self)
self.mem.SetVar(
lvalue.Named(node.name.val), value.Obj(obj), (), scope_e.GlobalOnly)
status = 0
elif node.tag == command_e.If:
done = False
for arm in node.arms:
self._PushErrExit(arm.spids[0]) # if/elif spid
try:
status = self._ExecuteList(arm.cond)
finally:
self._PopErrExit()
if status == 0:
status = self._ExecuteList(arm.action)
done = True
break
# TODO: The compiler should flatten this
if not done and node.else_action is not None:
status = self._ExecuteList(node.else_action)
elif node.tag == command_e.NoOp:
status = 0 # make it true
elif node.tag == command_e.Case:
val = self.word_ev.EvalWordToString(node.to_match)
to_match = val.s
status = 0 # If there are no arms, it should be zero?
done = False
for arm in node.arms:
for pat_word in arm.pat_list:
# NOTE: Is it OK that we're evaluating these as we go?
# TODO: case "$@") shouldn't succeed? That's a type error?
# That requires strict-array?
pat_val = self.word_ev.EvalWordToString(pat_word, do_fnmatch=True)
#log('Matching word %r against pattern %r', to_match, pat_val.s)
if libc.fnmatch(pat_val.s, to_match):
status = self._ExecuteList(arm.action)
done = True # TODO: Parse ;;& and for fallthrough and such?
break # Only execute action ONCE
if done:
break
elif node.tag == command_e.TimeBlock:
# TODO:
# - When do we need RUSAGE_CHILDREN?
# - Respect TIMEFORMAT environment variable.
# "If this variable is not set, Bash acts as if it had the value"
# $'\nreal\t%3lR\nuser\t%3lU\nsys\t%3lS'
# "A trailing newline is added when the format string is displayed."
start_t = time.time() # calls gettimeofday() under the hood
start_u = resource.getrusage(resource.RUSAGE_SELF)
status = self._Execute(node.pipeline)
end_t = time.time()
end_u = resource.getrusage(resource.RUSAGE_SELF)
real = end_t - start_t
user = end_u.ru_utime - start_u.ru_utime
sys_ = end_u.ru_stime - start_u.ru_stime
libc.print_time(real, user, sys_)
else:
raise NotImplementedError(node.__class__.__name__)
return status, check_errexit
def _Execute(self, node, fork_external=True):
"""Apply redirects, call _Dispatch(), and performs the errexit check.
Args:
node: syntax_asdl.command_t
fork_external: if we get a SimpleCommand that is an external command,
should we fork first? This is disabled in the context of a pipeline
process and a subshell.
"""
# See core/builtin.py for the Python signal handler that appends to this
# list.
if self.trap_nodes:
# Make a copy and clear it so we don't cause an infinite loop.
to_run = list(self.trap_nodes)
del self.trap_nodes[:]
for trap_node in to_run: # NOTE: Don't call this 'node'!
self._Execute(trap_node)
# strict_errexit check for all compound commands.
# TODO: Speed this up with some kind of bit mask?
eo = self.exec_opts
if eo.strict_errexit and _DisallowErrExit(node):
span_id = eo.errexit.SpidIfDisabled()
if span_id != runtime.NO_SPID:
node_str = node.__class__.__name__.split('_')[-1] # e.g. BraceGroup
e_die("errexit is disabled here, but strict_errexit disallows it "
"with a compound command (%s)", node_str, span_id=span_id)
# These nodes have no redirects. NOTE: Function definitions have
# redirects, but we do NOT want to evaluate them yet! They're evaluated
# on every invocation.
# TODO: Speed this up with some kind of bit mask?
if node.tag in (
command_e.NoOp, command_e.ControlFlow, command_e.Pipeline,
command_e.AndOr, command_e.CommandList, command_e.Sentence,
command_e.TimeBlock, command_e.ShFunction, command_e.VarDecl,
command_e.PlaceMutation, command_e.OilCondition, command_e.Proc,
command_e.Func, command_e.Return, command_e.Expr):
redirects = []
else:
try:
redirects = self._EvalRedirects(node)
except util.RedirectEvalError as e:
ui.PrettyPrintError(e, self.arena)
redirects = None
check_errexit = True
if redirects is None: # evaluation error
status = 1
elif redirects:
if self.fd_state.Push(redirects, self.waiter):
try:
status, check_errexit = self._Dispatch(node, fork_external)
finally:
self.fd_state.Pop()
#log('_dispatch returned %d', status)
else: # Error applying redirects, e.g. bad file descriptor.
status = 1
else: # No redirects
status, check_errexit = self._Dispatch(node, fork_external)
self.mem.SetLastStatus(status)
# NOTE: Bash says that 'set -e' checking is done after each 'pipeline'.
# However, any bash construct can appear in a pipeline. So it's easier
# just to put it at the end, instead of after every node.
#
# Possible exceptions:
# - function def (however this always exits 0 anyway)
# - assignment - its result should be the result of the RHS?
# - e.g. arith sub, command sub? I don't want arith sub.
# - ControlFlow: always raises, it has no status.
if check_errexit:
self._CheckStatus(status, node)
return status
def _ExecuteList(self, children):
status = 0 # for empty list
for child in children:
status = self._Execute(child) # last status wins
return status
def LastStatus(self):
"""For main_loop.py to determine the exit code of the shell itself."""
return self.mem.LastStatus()
def ExecuteAndCatch(self, node, fork_external=True):
"""Execute a subprogram, handling _ControlFlow and fatal exceptions.
Args:
node: LST subtree
fork_external: whether external commands require forking
Returns:
TODO: use enum 'why' instead of the 2 booleans
Used by main_loop.py.
Also:
- SubProgramThunk for pipelines, subshell, command sub, process sub
- TODO: Signals besides EXIT trap
Most other clients call _Execute():
- _Source() for source builtin
- _Eval() for eval builtin
- _RunProc() for function call
"""
is_return = False
is_fatal = False
try:
status = self._Execute(node, fork_external=fork_external)
except _ControlFlow as e:
# Return at top level is OK, unlike in bash.
if e.IsReturn():
is_return = True
status = e.StatusCode()
else:
# Invalid control flow
self.errfmt.Print(
"Loop and control flow can't be in different processes",
span_id=e.token.span_id)
is_fatal = True
# All shells exit 0 here. It could be hidden behind
# strict-control-flow if the incompatibility causes problems.
status = 1
except util.ParseError as e:
self.dumper.MaybeCollect(self, e) # Do this before unwinding stack
raise
except util.FatalRuntimeError as e:
self.dumper.MaybeCollect(self, e) # Do this before unwinding stack
if not e.HasLocation(): # Last resort!
e.span_id = self.mem.CurrentSpanId()
ui.PrettyPrintError(e, self.arena, prefix='fatal: ')
is_fatal = True
status = e.exit_status if e.exit_status is not None else 1
self.dumper.MaybeDump(status)
self.mem.SetLastStatus(status)
return is_return, is_fatal
def MaybeRunExitTrap(self):
"""If an EXIT trap exists, run it.
Returns:
Whether we should use the status of the handler.
This is odd behavior, but all bash/dash/mksh seem to agree on it.
See cases 7-10 in builtin-trap.test.sh.
"""
handler = self.traps.get('EXIT')
if handler:
is_return, is_fatal = self.ExecuteAndCatch(handler.node)
return is_return # explicit 'return' in the trap handler!
else:
return False # nothing run, don't use its status
def RunCommandSub(self, node):
p = self._MakeProcess(node,
inherit_errexit=self.exec_opts.inherit_errexit)
r, w = posix.pipe()
p.AddStateChange(process.StdoutToPipe(r, w))
_ = p.Start()
#log('Command sub started %d', pid)
chunks = []
posix.close(w) # not going to write
while True:
byte_str = posix.read(r, 4096)
if not byte_str:
break
chunks.append(byte_str)
posix.close(r)
status = p.Wait(self.waiter)
# OSH has the concept of aborting in the middle of a WORD. We're not
# waiting until the command is over!
if self.exec_opts.more_errexit:
if self.exec_opts.ErrExit() and status != 0:
raise util.ErrExitFailure(
'Command sub exited with status %d (%r)', status,
node.__class__.__name__)
else:
# Set a flag so we check errexit at the same time as bash. Example:
#
# a=$(false)
# echo foo # no matter what comes here, the flag is reset
#
# Set ONLY until this command node has finished executing.
self.check_command_sub_status = True
self.mem.SetLastStatus(status)
# Runtime errors test case: # $("echo foo > $@")
# Why rstrip()?
# https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
return ''.join(chunks).rstrip('\n')
def RunProcessSub(self, node, op_id):
"""Process sub creates a forks a process connected to a pipe.
The pipe is typically passed to another process via a /dev/fd/$FD path.
TODO:
sane-proc-sub:
- wait for all the words
Otherwise, set $! (mem.last_bg_pid)
strict-proc-sub:
- Don't allow it anywhere except SimpleCommand, any redirect, or
ShAssignment? And maybe not even assignment?
Should you put return codes in @PROCESS_SUB_STATUS? You need two of them.
"""
p = self._MakeProcess(node)
r, w = posix.pipe()
if op_id == Id.Left_ProcSubIn:
# Example: cat < <(head foo.txt)
#
# The head process should write its stdout to a pipe.
redir = process.StdoutToPipe(r, w)
elif op_id == Id.Left_ProcSubOut:
# Example: head foo.txt > >(tac)
#
# The tac process should read its stdin from a pipe.
#
# NOTE: This appears to hang in bash? At least when done interactively.
# It doesn't work at all in osh interactively?
redir = process.StdinFromPipe(r, w)
else:
raise AssertionError
p.AddStateChange(redir)
# Fork, letting the child inherit the pipe file descriptors.
pid = p.Start()
# After forking, close the end of the pipe we're not using.
if op_id == Id.Left_ProcSubIn:
posix.close(w)
elif op_id == Id.Left_ProcSubOut:
posix.close(r)
else:
raise AssertionError
# NOTE: Like bash, we never actually wait on it!
# TODO: At least set $! ?
# Is /dev Linux-specific?
if op_id == Id.Left_ProcSubIn:
return '/dev/fd/%d' % r
elif op_id == Id.Left_ProcSubOut:
return '/dev/fd/%d' % w
else:
raise AssertionError
def _RunProc(self, func_node, argv):
"""Run a shell "functions".
For SimpleCommand and registered completion hooks.
"""
# These are redirects at DEFINITION SITE. You can also have redirects at
# the CALL SITE. For example:
#
# f() { echo hi; } 1>&2
# f 2>&1
try:
def_redirects = self._EvalRedirects(func_node)
except util.RedirectEvalError as e:
ui.PrettyPrintError(e, self.arena)
return 1
if def_redirects:
if not self.fd_state.Push(def_redirects, self.waiter):
return 1 # error
self.mem.PushCall(func_node.name, func_node.spids[0], argv)
# Redirects still valid for functions.
# Here doc causes a pipe and Process(SubProgramThunk).
try:
status = self._Execute(func_node.body)
except _ControlFlow as e:
if e.IsReturn():
status = e.StatusCode()
else:
# break/continue used in the wrong place.
e_die('Unexpected %r (in function call)', e.token.val, token=e.token)
except (util.FatalRuntimeError, util.ParseError) as e:
self.dumper.MaybeCollect(self, e) # Do this before unwinding stack
raise
finally:
self.mem.PopCall()
if def_redirects:
self.fd_state.Pop()
return status
def _RunOilProc(self, proc, argv):
# type: (command__Proc, List[str]) -> int
"""
Run an oil proc foo { } or proc foo(x, y, @names) { }
"""
node = proc.node
sig = node.sig
if sig.tag == proc_sig_e.Closed:
# We're binding named params. User should use @rest. No 'shift'.
proc_argv = []
else:
proc_argv = argv
self.mem.PushCall(node.name.val, node.name.span_id, proc_argv)
n_args = len(argv)
if sig.tag == proc_sig_e.Closed: # proc is-closed []
for i, p in enumerate(sig.params):
if i < n_args:
val = value.Str(argv[i])
else:
val = proc.defaults[i]
if val is None:
e_die("No value provided for param %r", p.name.val)
self.mem.SetVar(lvalue.Named(p.name.val), val, (), scope_e.LocalOnly)
n_params = len(sig.params)
if sig.rest:
leftover = value.MaybeStrArray(argv[n_params:])
self.mem.SetVar(
lvalue.Named(sig.rest.val), leftover, (), scope_e.LocalOnly)
else:
if n_args > n_params:
raise TypeError(
"proc %r expected %d arguments, but got %d" %
(node.name.val, n_params, n_args))
# TODO:
# - Handle &block param? How to do that? It's really the
# syntax_asdl.command_t type? Or objects.Block probably.
try:
status = self._Execute(node.body)
except _ControlFlow as e:
if e.IsReturn():
status = e.StatusCode()
else:
# break/continue used in the wrong place.
e_die('Unexpected %r (in function call)', e.token.val, token=e.token)
except (util.FatalRuntimeError, util.ParseError) as e:
self.dumper.MaybeCollect(self, e) # Do this before unwinding stack
raise
finally:
self.mem.PopCall()
return status
def RunOilFunc(self, func, args, kwargs):
# type: (objects.Func, List[Any], Dict[str, Any]) -> Any
"""Run an Oil function.
var x = abs(y) do f(x) @split(mystr) @join(myarray)
"""
# TODO:
# - Return value register should be separate?
# - But how does 'return 345' work? Is that the return builtin
# raising an exception?
# Or is it setting the register?
# - I think the exception can have any type, but when you catch it
# you determine whether it's LastStatus() or it's something else.
# - Something declared with 'func' CANNOT have both?
#
# - Type checking
# - If the arguments are all strings, make them @ARGV?
# That isn't happening right now.
#log('RunOilFunc kwargs %s', kwargs)
#log('RunOilFunc named_defaults %s', func.named_defaults)
node = func.node
self.mem.PushTemp()
# Bind positional arguments
n_args = len(args)
n_params = len(node.pos_params)
for i, param in enumerate(node.pos_params):
if i < n_args:
py_val = args[i]
val = _PyObjectToVal(py_val)
else:
val = func.pos_defaults[i]
if val is None:
# Python raises TypeError. Should we do something else?
raise TypeError('No value provided for param %r', param.name)
self.mem.SetVar(lvalue.Named(param.name.val), val, (), scope_e.LocalOnly)
if node.pos_splat:
splat_name = node.pos_splat.val
# NOTE: This is a heterogeneous TUPLE, not list.
leftover = value.Obj(args[n_params:])
self.mem.SetVar(lvalue.Named(splat_name), leftover, (), scope_e.LocalOnly)
else:
if n_args > n_params:
raise TypeError(
"func %r expected %d arguments, but got %d" %
(node.name.val, n_params, n_args))
# Bind named arguments
for i, param in enumerate(node.named_params):
name = param.name
if name.val in kwargs:
py_val = kwargs.pop(name.val) # REMOVE it
val = _PyObjectToVal(py_val)
else:
if name.val in func.named_defaults:
val = func.named_defaults[name.val]
else:
raise TypeError(
"Named argument %r wasn't passed, and it doesn't have a default "
"value" % name.val)
self.mem.SetVar(lvalue.Named(name.val), val, (), scope_e.LocalOnly)
if node.named_splat:
splat_name = node.named_splat.val
# Note: this dict is not an AssocArray
leftover = value.Obj(kwargs)
self.mem.SetVar(lvalue.Named(splat_name), leftover, (), scope_e.LocalOnly)
else:
if kwargs:
raise TypeError(
'func %r got unexpected named arguments: %s' %
(node.name.val, ', '.join(kwargs.keys())))
return_val = None
try:
self._Execute(node.body)
except _ControlFlow as e:
if e.IsReturn():
# TODO: Rename this
return_val = e.StatusCode()
finally:
self.mem.PopTemp()
return return_val
def RunLambda(self, lambda_node, args, kwargs):
""" Run a lambda like |x| x+1 """
self.mem.PushTemp()
# Bind params. TODO: Reject kwargs, etc.
for i, param in enumerate(lambda_node.params):
val = value.Obj(args[i])
self.mem.SetVar(lvalue.Named(param.name.val), val, (), scope_e.LocalOnly)
return_val = None
try:
return_val = self.expr_ev.EvalExpr(lambda_node.body)
finally:
self.mem.PopTemp()
return return_val
def EvalBlock(self, block):
"""
Returns a namespace. For config files.
rule foo {
a = 1
}
is like:
foo = {a:1}
"""
status = None
self.mem.PushTemp() # So variables don't conflict
try:
self._Execute(block) # can raise FatalRuntimeError, etc.
except _ControlFlow as e: # A block is more like a function.
if e.IsReturn():
status = e.StatusCode()
else:
raise
finally:
namespace = self.mem.TopNamespace()
self.mem.PopTemp()
# This is the thing on self.mem?
# Filter out everything beginning with _ ?
# TODO: Return arbitrary values instead
namespace['_returned'] = status
return namespace
def RunFuncForCompletion(self, func_node, argv):
# TODO: Change this to run Oil procs and funcs too
try:
status = self._RunProc(func_node, argv)
except util.FatalRuntimeError as e:
ui.PrettyPrintError(e, self.arena)
status = e.exit_status if e.exit_status is not None else 1
except _ControlFlow as e:
# shouldn't be able to exit the shell from a completion hook!
# TODO: Avoid overwriting the prompt!
self.errfmt.Print('Attempted to exit from completion hook.',
span_id=e.token.span_id)
status = 1
# NOTE: (IOError, OSError) are caught in completion.py:ReadlineCallback
return status
| 33.107794
| 122
| 0.631372
|
67e5424ff0ab28ea1400fb0eb5ba5b8ec5a2941f
| 5,489
|
py
|
Python
|
codes/models/PPN.py
|
ZichengDuan/MVM3D
|
5242fa05afb6bff097908c88a8ef0fd9bc4a1fc5
|
[
"MIT"
] | 21
|
2021-09-14T19:11:29.000Z
|
2022-02-05T05:58:32.000Z
|
codes/models/PPN.py
|
Robert-Mar/MVM3D
|
b62c96de5894ae5fef615e2ee54fe975248a3df7
|
[
"MIT"
] | 1
|
2021-11-25T08:56:32.000Z
|
2021-12-04T07:40:23.000Z
|
codes/models/PPN.py
|
Robert-Mar/MVM3D
|
b62c96de5894ae5fef615e2ee54fe975248a3df7
|
[
"MIT"
] | 2
|
2021-09-13T04:07:10.000Z
|
2021-09-14T09:15:52.000Z
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import kornia
from codes.models.resnet import resnet18
import matplotlib
from codes.models.region_proposal_network import RegionProposalNetwork
import cv2
from codes.EX_CONST import Const
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(1, self.shape[0])
class PerspTransDetector(nn.Module):
def __init__(self, dataset = None):
super().__init__()
if dataset is not None:
self.num_cam = dataset.num_cam
self.img_shape, self.reducedgrid_shape = dataset.img_shape, dataset.reducedgrid_shape
imgcoord2worldgrid_matrices = self.get_imgcoord2worldgrid_matrices(dataset.base.intrinsic_matrices,
dataset.base.extrinsic_matrices,
dataset.base.worldgrid2worldcoord_mat)
self.coord_map = self.create_coord_map(self.reducedgrid_shape + [1])
# img
self.upsample_shape = list(map(lambda x: int(x / Const.reduce), self.img_shape))
img_reduce = np.array(self.img_shape) / np.array(self.upsample_shape)
img_zoom_mat = np.diag(np.append(img_reduce, [1]))
# map
map_zoom_mat = np.diag(np.append(np.ones([2]) / Const.reduce, [1]))
self.proj_mats = [torch.from_numpy(map_zoom_mat @ imgcoord2worldgrid_matrices[cam] @ img_zoom_mat)
for cam in range(self.num_cam)]
self.backbone = nn.Sequential(*list(resnet18(pretrained=True, replace_stride_with_dilation=[False, False, True]).children())[:-2]).cuda()
self.rpn = RegionProposalNetwork(in_channels=1026, mid_channels=1026, ratios=[0.9, 1.1], anchor_scales=[4]).cuda()
def forward(self, imgs,frame, gt_boxes = None, epoch = None, visualize=False, train = True, mark = None):
B, N, C, H, W = imgs.shape
world_features = []
img_featuremap = []
for cam in range(self.num_cam):
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
img_feature =self.backbone(imgs[:, cam].cuda())
img_feature = F.interpolate(img_feature, self.upsample_shape, mode='bilinear')
if cam == 0:
plt.imsave("img_norm_0.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("img_norm_1.jpg", torch.norm(img_feature[0], dim=0).cpu().numpy())
img_featuremap.append(img_feature)
proj_mat = self.proj_mats[cam].repeat([B, 1, 1]).float().cuda()
world_feature = kornia.warp_perspective(img_feature.cuda(), proj_mat, self.reducedgrid_shape) # 0.0142 * 2 = 0.028
world_feature = kornia.vflip(world_feature)
if cam == 0:
plt.imsave("world_feature_0.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
else:
plt.imsave("world_feature_1.jpg", torch.norm(world_feature[0], dim=0).cpu().numpy())
world_features.append(world_feature.cuda())
world_features = torch.cat(world_features + [self.coord_map.repeat([B, 1, 1, 1]).cuda()], dim=1)
plt.imsave("world_features.jpg", torch.norm(world_features[0], dim=0).cpu().numpy())
rpn_locs, rpn_scores, anchor, rois, roi_indices = self.rpn(world_features, Const.grid_size) # 0.08
return rpn_locs, rpn_scores, anchor, rois, roi_indices, img_featuremap, world_features
def get_imgcoord2worldgrid_matrices(self, intrinsic_matrices, extrinsic_matrices, worldgrid2worldcoord_mat):
projection_matrices = {}
for cam in range(self.num_cam):
worldcoord2imgcoord_mat = intrinsic_matrices[cam] @ np.delete(extrinsic_matrices[cam], 2, 1)
worldgrid2imgcoord_mat = worldcoord2imgcoord_mat @ worldgrid2worldcoord_mat
imgcoord2worldgrid_mat = np.linalg.inv(worldgrid2imgcoord_mat)
permutation_mat = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
projection_matrices[cam] = permutation_mat @ imgcoord2worldgrid_mat
return projection_matrices
def create_coord_map(self, img_size, with_r=False):
H, W, C = img_size
grid_x, grid_y = np.meshgrid(np.arange(W), np.arange(H))
grid_x = torch.from_numpy(grid_x / (W - 1) * 2 - 1).float()
grid_y = torch.from_numpy(grid_y / (H - 1) * 2 - 1).float()
ret = torch.stack([grid_x, grid_y], dim=0).unsqueeze(0)
if with_r:
rr = torch.sqrt(torch.pow(grid_x, 2) + torch.pow(grid_y, 2)).view([1, 1, H, W])
ret = torch.cat([ret, rr], dim=1)
return ret
def vis_feature(x, max_num=5, out_path='/home/dzc/Desktop/CASIA/proj/mvRPN-det/images/'):
for i in range(0, x.shape[1]):
if i >= max_num:
break
feature = x[0, i, :, :].view(x.shape[-2], x.shape[-1])
feature = feature.detach().cpu().numpy()
feature = 1.0 / (1 + np.exp(-1 * feature))
feature = np.round(feature * 255).astype(np.uint8)
feature_img = cv2.applyColorMap(feature, cv2.COLORMAP_JET)
dst_path = os.path.join(out_path, str(i) + '.jpg')
cv2.imwrite(dst_path, feature_img)
| 48.149123
| 145
| 0.62434
|
9ea8f734a2a1948cc5f1ffc39a3dce529ba75710
| 2,364
|
py
|
Python
|
jacket/api/compute/openstack/compute/legacy_v2/contrib/image_size.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/api/compute/openstack/compute/legacy_v2/contrib/image_size.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/api/compute/openstack/compute/legacy_v2/contrib/image_size.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | 2
|
2016-08-10T02:21:49.000Z
|
2020-07-24T01:57:21.000Z
|
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jacket.api.compute.openstack import extensions
from jacket.api.compute.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'image_size')
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
key = "%s:size" % Image_size.alias
image[key] = image_cache['size']
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ["compute.context"]
if authorize(context):
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
# it in its 'show' method
image_cached = req.get_db_item('images', image_resp['id'])
self._extend_image(image_resp, image_cached)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['compute.context']
if authorize(context):
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
# it in its 'detail' method
for image in images_resp:
image_cached = req.get_db_item('images', image['id'])
self._extend_image(image, image_cached)
class Image_size(extensions.ExtensionDescriptor):
"""Adds image size to image listings."""
name = "ImageSize"
alias = "OS-EXT-IMG-SIZE"
namespace = ("http://docs.openstack.org/compute/ext/"
"image_size/api/v1.1")
updated = "2013-02-19T00:00:00Z"
def get_controller_extensions(self):
controller = ImageSizeController()
extension = extensions.ControllerExtension(self, 'images', controller)
return [extension]
| 37.52381
| 78
| 0.668782
|
924b2947d314dc79feb69049ba885585f9ff6930
| 2,219
|
py
|
Python
|
installer-pkg/src/shminstaller/components/python.py
|
sha1n/macos-devenv-dump-poc
|
be439ad4a0c0ac265fe62d44bded73eab1a0c31d
|
[
"MIT"
] | null | null | null |
installer-pkg/src/shminstaller/components/python.py
|
sha1n/macos-devenv-dump-poc
|
be439ad4a0c0ac265fe62d44bded73eab1a0c31d
|
[
"MIT"
] | null | null | null |
installer-pkg/src/shminstaller/components/python.py
|
sha1n/macos-devenv-dump-poc
|
be439ad4a0c0ac265fe62d44bded73eab1a0c31d
|
[
"MIT"
] | null | null | null |
from shminspector.api.context import Context
from shminspector.api.reactor import Reactor, ReactorCommand
from shminspector.api.tags import macos, experimental, interactive, prerequisites
from shminspector.api.validator import ValidationResult, Status
from shminstaller.components.macosutil import download_and_install_commands_for
@macos
@experimental
@interactive
@prerequisites("disk-space", "network-connectivity")
class Python3InstallReactor(Reactor):
def react(self, data: ValidationResult, ctx: Context):
commands = []
if data.status == Status.NOT_FOUND:
commands = download_and_install_commands_for(ctx.config["installer"]["python"]["macos_package_url"])
commands.append(ReactorCommand(["/Applications/Python 3.6/Install Certificates.command"]))
commands.append(ReactorCommand(["/Applications/Python 3.6/Update Shell Profile.command"]))
else:
ctx.logger.info("Python 3 already installed! Detected version: {} - {}!"
.format(data.input_data.version, data.status.name))
return commands
@macos
@prerequisites("homebrew")
class PythonInstallReactor(Reactor):
def __init__(self, formula="python2"):
self.formula = formula
def react(self, data: ValidationResult, ctx: Context):
ctx.logger.info("Python already installed! Detected version: {} - {}!"
.format(data.input_data.version, data.status.name))
commands = []
if data.status == Status.NOT_FOUND:
commands.append(self._install_cmd())
elif data.status == data.status.UPGRADE_REQUIRED:
commands.append(self._upgrade_cmd())
elif data.status == data.status.DOWNGRADE_REQUIRED:
commands.append(self._uninstall_cmd())
commands.append(self._install_cmd())
return commands
def _install_cmd(self) -> ReactorCommand:
return ReactorCommand(["brew", "install", self.formula])
def _upgrade_cmd(self) -> ReactorCommand:
return ReactorCommand(["brew", "upgrade", self.formula])
def _uninstall_cmd(self) -> ReactorCommand:
return ReactorCommand(["brew", "uninstall", self.formula])
| 38.929825
| 112
| 0.6895
|
741df6adbcc2e2e21e0fb89c3be2e491d6f44534
| 2,225
|
py
|
Python
|
cities_light/tests/test_unicode.py
|
endur24/django-cities-light
|
c3c31c7e35d85952a77b9c4f90bdd30930d80e5e
|
[
"MIT"
] | 1
|
2020-03-28T05:41:13.000Z
|
2020-03-28T05:41:13.000Z
|
cities_light/tests/test_unicode.py
|
endur24/django-cities-light
|
c3c31c7e35d85952a77b9c4f90bdd30930d80e5e
|
[
"MIT"
] | null | null | null |
cities_light/tests/test_unicode.py
|
endur24/django-cities-light
|
c3c31c7e35d85952a77b9c4f90bdd30930d80e5e
|
[
"MIT"
] | 2
|
2018-10-02T11:33:47.000Z
|
2018-10-05T16:56:33.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
import six
import unidecode
from django.utils.encoding import force_text
from ..abstract_models import to_ascii
from .base import TestImportBase, FixtureDir
class TestUnicode(TestImportBase):
"""Test case for unicode errors."""
def test_exception_logging_unicode_error(self):
"""
Test logging of duplicate row and UnicodeDecodeError.
See issue https://github.com/yourlabs/django-cities-light/issues/61
"""
fixture_dir = FixtureDir('unicode')
self.import_data(
fixture_dir,
'kemerovo_country',
'kemerovo_region',
'kemerovo_city',
'kemerovo_translations'
)
def test_unidecode_warning(self):
"""
Unidecode should get unicode object and not byte string.
unidecode/__init__.py:46: RuntimeWarning: Argument <type 'str'> is not an unicode object.
Passing an encoded string will likely have unexpected results.
This means to_ascii should return unicode string too.
"""
# Reset warning registry to trigger the test if the warning was already issued
# See http://bugs.python.org/issue21724
registry = getattr(unidecode, '__warningregistry__', None)
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.import_data(
FixtureDir('unicode'),
'kemerovo_country',
'kemerovo_region',
'kemerovo_city',
'kemerovo_translations'
)
for w in warns:
warn = force_text(w.message)
self.assertTrue("not an unicode object" not in warn, warn)
def test_to_ascii(self):
"""Test to_ascii behavior."""
self.assertEqual(to_ascii('République Françaisen'), 'Republique Francaisen')
self.assertEqual(to_ascii('Кемерово'), 'Kemerovo')
# Check that return value is unicode on python 2.7 or str on python 3
self.assertTrue(isinstance(to_ascii('Кемерово'), six.text_type))
| 32.246377
| 97
| 0.633708
|
ae43a76cc3a750e34ab7c85c37914ca45470dcc4
| 231
|
py
|
Python
|
tests/setup.py
|
sayantansatpati/disdat
|
392b04be0e852889fc3f85971b7f7cfe4884804c
|
[
"Apache-2.0"
] | 44
|
2018-02-21T20:38:20.000Z
|
2022-02-13T21:24:02.000Z
|
tests/setup.py
|
sayantansatpati/disdat
|
392b04be0e852889fc3f85971b7f7cfe4884804c
|
[
"Apache-2.0"
] | 77
|
2018-01-18T01:38:07.000Z
|
2022-02-04T02:27:32.000Z
|
tests/setup.py
|
sayantansatpati/disdat
|
392b04be0e852889fc3f85971b7f7cfe4884804c
|
[
"Apache-2.0"
] | 11
|
2018-09-17T20:37:32.000Z
|
2021-07-23T16:37:04.000Z
|
from setuptools import find_packages, setup
setup(
name='disdat-test-pipelines',
version=0.1,
packages=find_packages(exclude=['config']),
include_package_data=True,
install_requires=[
'pandas'
],
)
| 19.25
| 47
| 0.670996
|
96c634a979c9c03d82012b3d55c40068131e3a77
| 2,855
|
py
|
Python
|
script.module.urlresolver/lib/urlresolver/plugins/thevideo.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 1
|
2021-05-09T19:55:51.000Z
|
2021-05-09T19:55:51.000Z
|
script.module.urlresolver/lib/urlresolver/plugins/thevideo.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | null | null | null |
script.module.urlresolver/lib/urlresolver/plugins/thevideo.py
|
parser4life/tantrumrepo
|
3b37145f4772409e538cbddb0b7aa23be525772a
|
[
"Beerware"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
'''
thevideo urlresolver plugin
Copyright (C) 2014 Eldorado
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib
import urllib2
import json
from lib import helpers
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
class TheVideoResolver(UrlResolver):
name = "thevideo"
domains = ["thevideo.me", "tvad.me", "thevideo.cc", "thevideo.us", "thevideo.io", "thevideo.website"]
pattern = '(?://|\.)((?:thevideo\.(?:me|cc|us|io|website))|tvad\.me)/(?:embed-|download/)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
self.headers = {'User-Agent': common.SMU_USER_AGENT}
def get_media_url(self, host, media_id):
result = self.__auth_ip(media_id)
if 'vt' in result:
vt = result['vt']
del result['vt']
return helpers.pick_source(result.items()) + '?' + urllib.urlencode({'vt': vt}) + helpers.append_headers(self.headers)
else:
raise ResolverError('Video Token Missing')
def __auth_ip(self, media_id):
header = i18n('thevideo_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair') % ('https://tvad.me/pair')
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
common.logger.log('Checking Auth: %s' % (media_id))
url = 'https://tvad.me/pair?file_code=%s&check' % (media_id)
try: js_result = json.loads(self.net.http_GET(url, headers=self.headers).content)
except ValueError:
raise ResolverError('Unusable Authorization Response')
except urllib2.HTTPError as e:
if e.code == 401:
js_result = json.loads(str(e.read()))
else:
raise
common.logger.log('Auth Result: %s' % (js_result))
if js_result.get('status'):
return js_result.get('response', {})
else:
return {}
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://tvad.me/embed-{media_id}.html')
| 39.652778
| 130
| 0.65359
|
89626f3666eb896c04384f446cae5245f53448dd
| 13,784
|
py
|
Python
|
serve/triton/python_model_builder.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
serve/triton/python_model_builder.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
serve/triton/python_model_builder.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import List, Tuple, Any, Dict
import serve.triton.type_gen as tygen
import serve.triton.format_utils as fmt
class PyModelBuilder:
"""
The base class of python model builder.
Subclass of PyModelBuilder should implement the following methods:
`gen_import`, `gen_init`.
"""
def gen_imports(self):
raise NotImplementedError('gen_imports not implemented')
def gen_initialize(self):
raise NotImplementedError('gen_initialize not implemented')
def gen_execute(self):
raise NotImplementedError('gen_execute not implemented')
def gen_finalize(self):
raise NotImplementedError('gen_execute not implemented')
def build(self, save_path: str):
with open(save_path, 'wt', encoding='utf-8') as f:
f.writelines(self.gen_imports())
f.writelines(self.gen_blank_lines(2))
f.writelines(self.gen_class_name())
f.writelines(self.gen_blank_lines(1))
f.writelines(fmt.intend(self.gen_initialize()))
f.writelines(self.gen_blank_lines(1))
f.writelines(fmt.intend(self.gen_execute()))
f.writelines(self.gen_blank_lines(1))
f.writelines(fmt.intend(self.gen_finalize()))
@staticmethod
def gen_class_name():
lines = ['class TritonPythonModel:']
return fmt.add_line_separator(lines)
@staticmethod
def gen_blank_lines(n=1):
lines = [''] * n
return fmt.add_line_separator(lines)
@staticmethod
def _from_tensor_to_obj(
type_info: tygen.TypeInfo,
obj_init_code: str,
obj_var: str,
tensor_vars: List[str]
):
"""
Generate the codes converting Tensor to python object
"""
trs_placeholders = [attr.tensor_placeholder for attr in type_info.attr_info]
# check existence of placeholders
for tr in trs_placeholders:
if obj_init_code.find(tr) == -1:
raise ValueError('Can not match placeholder %s with init function %s.' % (tr, obj_init_code))
if type_info.is_list:
init_args = ['arg' + str(i) for i in range(len(trs_placeholders))]
for tr, arg in zip(trs_placeholders, init_args):
obj_init_code = obj_init_code.replace(tr, arg)
line = obj_var + ' = [' + obj_init_code + ' for ' + ', '.join(init_args) + ' in zip(' + ', '.join(tensor_vars) + ')]'
else:
for tr, arg in zip(trs_placeholders, tensor_vars):
obj_init_code = obj_init_code.replace(tr, arg)
line = obj_var + ' = ' + obj_init_code
return [line]
@staticmethod
def _from_obj_to_tensor(
type_info: tygen.TypeInfo,
obj_var: str,
tensor_vars: List[str],
tensor_names: List[str],
obj_placeholder='$obj'
):
"""
Generate the codes converting python object to Tensor
"""
lines = []
for attr, tr_name, tr_var in zip(type_info.attr_info, tensor_names, tensor_vars):
data = attr.obj_placeholder.replace(obj_placeholder, obj_var)
dtype = attr.numpy_dtype
if tygen.is_scalar(attr):
ndarray = 'numpy.array([' + data + '], ' + dtype + ')'
else:
ndarray = 'numpy.array(' + data + ', ' + dtype + ')'
line = tr_var + ' = pb_utils.Tensor(\'' + tr_name + '\', ' + ndarray + ')'
lines.append(line)
return lines
class PickledCallablePyModelBuilder(PyModelBuilder):
"""
Build python model from pickled
"""
def __init__(
self,
module_name: str,
python_file_path: str,
pickle_file_name: str,
input_annotations: List[Tuple[Any, Tuple]],
output_annotations: List[Tuple[Any, Tuple]]
):
self.module_name = module_name
self.python_file_path = python_file_path
self.pickle_file_name = pickle_file_name
self.input_annotations = input_annotations
self.output_annotations = output_annotations
def gen_imports(self):
lines = []
lines.append('import towhee')
lines.append('import numpy')
lines.append('from pathlib import Path')
lines.append('import pickle')
lines.append('import importlib')
lines.append('import sys')
lines.append('import triton_python_backend_utils as pb_utils')
return fmt.add_line_separator(lines)
def gen_initialize(self):
lines = []
lines.append('def initialize(self, args):')
lines.append('')
lines.append('# load module')
lines.append(f'module_name = "{self.module_name}"')
lines.append(f'path = "{self.python_file_path}"')
lines.append('spec = importlib.util.spec_from_file_location(module_name, path)')
lines.append('module = importlib.util.module_from_spec(spec)')
lines.append('sys.modules[module_name] = module')
lines.append('spec.loader.exec_module(module)')
lines.append('')
lines.append('# create callable object')
lines.append(f'pickle_file_path = Path(__file__).parent / "{self.pickle_file_name}"')
lines.append('with open(pickle_file_path, \'rb\') as f:')
lines.append(fmt.intend('self.callable_obj = pickle.load(f)'))
lines = lines[:1] + fmt.intend(lines[1:])
return fmt.add_line_separator(lines)
def gen_execute(self):
lines = []
lines.append('def execute(self, requests):')
lines.append('')
lines.append('responses = []')
lines.append('')
taskloop = []
taskloop.append('for request in requests:')
taskloop.append('# get input tensors from request')
input_type_info = tygen.get_type_info(self.input_annotations)
input_arg_init_code = tygen.get_init_code(self.input_annotations)
tr_idx = 0
callable_input_args = []
for arg_idx, type_info in enumerate(input_type_info):
arg = 'arg' + str(arg_idx)
callable_input_args.append(arg)
num_attrs = len(type_info.attr_info)
tr_vars = ['in' + str(tr_idx + i) for i in range(num_attrs)]
for tr in tr_vars:
taskloop.append(tr + ' = pb_utils.get_input_tensor_by_name(request, \'INPUT' + str(tr_idx) + '\')')
tr_idx = tr_idx + 1
taskloop.append('')
taskloop.append('# create input args from tensors')
l = self._from_tensor_to_obj(type_info, input_arg_init_code[arg_idx], arg, tr_vars)
taskloop = taskloop + l
taskloop.append('')
taskloop.append('# call callable object')
callable_results = ['result' + str(i) for i in range(len(self.output_annotations))]
taskloop.append(', '.join(callable_results) + ' = self.callable_obj(' + ' ,'.join(callable_input_args) + ')')
taskloop.append('')
taskloop.append('# convert results to tensors')
output_type_info = tygen.get_type_info(self.output_annotations)
tr_idx = 0
tr_out = []
for result_idx, type_info in enumerate(output_type_info):
num_attrs = len(type_info.attr_info)
tr_vars = ['out' + str(tr_idx + i) for i in range(num_attrs)]
tr_out = tr_out + tr_vars
outputs = ['OUTPUT' + str(tr_idx + i) for i in range(num_attrs)]
l = self._from_obj_to_tensor(type_info, callable_results[result_idx], tr_vars, outputs)
taskloop = taskloop + l
taskloop.append('')
taskloop.append('# organize response')
taskloop.append('response = pb_utils.InferenceResponse(output_tensors=[' + ', '.join(tr_out) + '])')
taskloop.append('responses.append(response)')
taskloop = taskloop[:1] + fmt.intend(taskloop[1:])
lines = lines + taskloop
lines.append('')
lines.append('return responses')
lines = lines[:1] + fmt.intend(lines[1:])
return fmt.add_line_separator(lines)
def gen_finalize(self):
lines = []
lines.append('def finalize(self):')
lines.append(fmt.intend('pass'))
return fmt.add_line_separator(lines)
class OpPyModelBuilder(PyModelBuilder):
"""
Build python model from operator
"""
def __init__(
self,
task_name: str,
op_name: str,
op_init_args: Dict,
input_annotations: List[Tuple[Any, Tuple]],
output_annotations: List[Tuple[Any, Tuple]]
):
self.task_name = task_name
self.op_name = op_name
self.op_init_args = op_init_args
self.input_annotations = input_annotations
self.output_annotations = output_annotations
def gen_imports(self):
lines = []
lines.append('import towhee')
lines.append('import numpy')
lines.append('from towhee import ops')
lines.append('import triton_python_backend_utils as pb_utils')
return fmt.add_line_separator(lines)
def gen_initialize(self):
lines = []
lines.append('def initialize(self, args):')
lines.append('')
lines.append('# create op instance')
lines.append('task = getattr(ops, \'' + self.task_name + '\')')
lines.append('init_args = ' + json.dumps(self.op_init_args))
lines.append('self.op = getattr(task, \'' + self.op_name + '\')(' + '**init_args' + ')')
lines = lines[:1] + fmt.intend(lines[1:])
return fmt.add_line_separator(lines)
def gen_execute(self):
lines = []
lines.append('def execute(self, requests):')
lines.append('')
lines.append('responses = []')
lines.append('')
taskloop = []
taskloop.append('for request in requests:')
taskloop.append('# get input tensors from request')
input_type_info = tygen.get_type_info(self.input_annotations)
input_arg_init_code = tygen.get_init_code(self.input_annotations)
tr_idx = 0
op_input_args = []
for arg_idx, type_info in enumerate(input_type_info):
arg = 'arg' + str(arg_idx)
op_input_args.append(arg)
num_attrs = len(type_info.attr_info)
tr_vars = ['in' + str(tr_idx + i) for i in range(num_attrs)]
for tr in tr_vars:
taskloop.append(tr + ' = pb_utils.get_input_tensor_by_name(request, \'INPUT' + str(tr_idx) + '\')')
tr_idx = tr_idx + 1
taskloop.append('')
taskloop.append('# create input args from tensors')
l = self._from_tensor_to_obj(type_info, input_arg_init_code[arg_idx], arg, tr_vars)
taskloop = taskloop + l
taskloop.append('')
taskloop.append('# call callable object')
op_results = ['result' + str(i) for i in range(len(self.output_annotations))]
taskloop.append(', '.join(op_results) + ' = self.op(' + ' ,'.join(op_input_args) + ')')
taskloop.append('')
taskloop.append('# convert results to tensors')
output_type_info = tygen.get_type_info(self.output_annotations)
tr_idx = 0
tr_out = []
for result_idx, type_info in enumerate(output_type_info):
num_attrs = len(type_info.attr_info)
tr_vars = ['out' + str(tr_idx + i) for i in range(num_attrs)]
tr_out = tr_out + tr_vars
outputs = ['OUTPUT' + str(tr_idx + i) for i in range(num_attrs)]
l = self._from_obj_to_tensor(type_info, op_results[result_idx], tr_vars, outputs)
taskloop = taskloop + l
taskloop.append('')
taskloop.append('# organize response')
taskloop.append('response = pb_utils.InferenceResponse(output_tensors=[' + ', '.join(tr_out) + '])')
taskloop.append('responses.append(response)')
taskloop = taskloop[:1] + fmt.intend(taskloop[1:])
lines = lines + taskloop
lines.append('')
lines.append('return responses')
lines = lines[:1] + fmt.intend(lines[1:])
return fmt.add_line_separator(lines)
def gen_finalize(self):
lines = []
lines.append('def finalize(self):')
lines.append(fmt.intend('pass'))
return fmt.add_line_separator(lines)
def gen_model_from_pickled_callable(
save_path: str,
module_name: str,
python_file_path: str,
pickle_file_name: str,
input_annotations: List[Tuple[Any, Tuple]],
output_annotations: List[Tuple[Any, Tuple]]
):
builder = PickledCallablePyModelBuilder(
module_name,
python_file_path,
pickle_file_name,
input_annotations,
output_annotations
)
return builder.build(save_path)
def gen_model_from_op(
save_path: str,
task_name: str,
op_name: str,
op_init_args: Dict,
input_annotations: List[Tuple[Any, Tuple]],
output_annotations: List[Tuple[Any, Tuple]]
):
builder = OpPyModelBuilder(
task_name,
op_name,
op_init_args,
input_annotations,
output_annotations
)
return builder.build(save_path)
| 34.203474
| 129
| 0.618906
|
b46bbe5377a6dd53bf802c8cb7029aeb1b8a2e16
| 69,602
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
|
ibelikov/dagster
|
6781eaadd33ecfb0b48d7c2c7d8e193efbda4209
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
|
ibelikov/dagster
|
6781eaadd33ecfb0b48d7c2c7d8e193efbda4209
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/core_tests/storage_tests/utils/event_log_storage.py
|
ibelikov/dagster
|
6781eaadd33ecfb0b48d7c2c7d8e193efbda4209
|
[
"Apache-2.0"
] | 1
|
2021-11-25T11:06:39.000Z
|
2021-11-25T11:06:39.000Z
|
import re
import time
from collections import Counter
from contextlib import ExitStack
import mock
import pytest
from dagster import (
AssetKey,
AssetMaterialization,
AssetObservation,
DagsterInstance,
Field,
InputDefinition,
ModeDefinition,
Output,
OutputDefinition,
RetryRequested,
job,
op,
pipeline,
resource,
seven,
solid,
)
from dagster.core.definitions import ExpectationResult
from dagster.core.definitions.dependency import NodeHandle
from dagster.core.definitions.pipeline_base import InMemoryPipeline
from dagster.core.events import (
DagsterEvent,
DagsterEventType,
EngineEventData,
StepExpectationResultData,
StepMaterializationData,
)
from dagster.core.events.log import EventLogEntry, construct_event_logger
from dagster.core.execution.api import execute_run
from dagster.core.execution.plan.handle import StepHandle
from dagster.core.execution.plan.objects import StepFailureData, StepSuccessData
from dagster.core.execution.stats import StepEventStatus
from dagster.core.storage.event_log import InMemoryEventLogStorage, SqlEventLogStorage
from dagster.core.storage.event_log.base import (
EventLogRecord,
EventRecordsFilter,
RunShardedEventsCursor,
)
from dagster.core.storage.event_log.migration import (
EVENT_LOG_DATA_MIGRATIONS,
migrate_asset_key_data,
)
from dagster.core.storage.event_log.sqlite.sqlite_event_log import SqliteEventLogStorage
from dagster.core.test_utils import instance_for_test
from dagster.core.utils import make_new_run_id
from dagster.loggers import colored_console_logger
from dagster.serdes import deserialize_json_to_dagster_namedtuple
DEFAULT_RUN_ID = "foo"
TEST_TIMEOUT = 5
# py36 & 37 list.append not hashable
# pylint: disable=unnecessary-lambda
def create_test_event_log_record(message: str, run_id: str = DEFAULT_RUN_ID):
return EventLogEntry(
None,
message,
"debug",
"",
run_id,
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
def _stats_records(run_id):
now = time.time()
return [
_event_record(run_id, "A", now - 325, DagsterEventType.STEP_START),
_event_record(
run_id,
"A",
now - 225,
DagsterEventType.STEP_SUCCESS,
StepSuccessData(duration_ms=100000.0),
),
_event_record(run_id, "B", now - 225, DagsterEventType.STEP_START),
_event_record(
run_id,
"B",
now - 175,
DagsterEventType.STEP_FAILURE,
StepFailureData(error=None, user_failure_data=None),
),
_event_record(run_id, "C", now - 175, DagsterEventType.STEP_START),
_event_record(run_id, "C", now - 150, DagsterEventType.STEP_SKIPPED),
_event_record(run_id, "D", now - 150, DagsterEventType.STEP_START),
_event_record(
run_id,
"D",
now - 125,
DagsterEventType.ASSET_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_1")),
),
_event_record(
run_id,
"D",
now - 100,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=True, label="exp 1")),
),
_event_record(
run_id,
"D",
now - 75,
DagsterEventType.ASSET_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_2")),
),
_event_record(
run_id,
"D",
now - 50,
DagsterEventType.STEP_EXPECTATION_RESULT,
StepExpectationResultData(ExpectationResult(success=False, label="exp 2")),
),
_event_record(
run_id,
"D",
now - 25,
DagsterEventType.ASSET_MATERIALIZATION,
StepMaterializationData(AssetMaterialization(asset_key="mat_3")),
),
_event_record(
run_id,
"D",
now,
DagsterEventType.STEP_SUCCESS,
StepSuccessData(duration_ms=150000.0),
),
]
def _event_record(run_id, solid_name, timestamp, event_type, event_specific_data=None):
pipeline_name = "pipeline_name"
solid_handle = NodeHandle(solid_name, None)
step_handle = StepHandle(solid_handle)
return EventLogEntry(
None,
"",
"debug",
"",
run_id,
timestamp,
step_key=step_handle.to_key(),
pipeline_name=pipeline_name,
dagster_event=DagsterEvent(
event_type.value,
pipeline_name,
solid_handle=solid_handle,
step_handle=step_handle,
event_specific_data=event_specific_data,
),
)
def _mode_def(event_callback):
@resource
def foo_resource():
time.sleep(0.1)
return "foo"
return ModeDefinition(
resource_defs={"foo": foo_resource},
logger_defs={
"callback": construct_event_logger(event_callback),
"console": colored_console_logger,
},
)
# This exists to create synthetic events to test the store
def _synthesize_events(solids_fn, run_id=None, check_success=True, instance=None, run_config=None):
events = []
def _append_event(event):
events.append(event)
@pipeline(mode_defs=[_mode_def(_append_event)])
def a_pipe():
solids_fn()
with ExitStack() as stack:
if not instance:
instance = stack.enter_context(DagsterInstance.ephemeral())
run_config = {
**{"loggers": {"callback": {}, "console": {}}},
**(run_config if run_config else {}),
}
pipeline_run = instance.create_run_for_pipeline(
a_pipe, run_id=run_id, run_config=run_config
)
result = execute_run(InMemoryPipeline(a_pipe), pipeline_run, instance)
if check_success:
assert result.success
return events, result
def _fetch_all_events(configured_storage, run_id=None):
with configured_storage.run_connection(run_id=run_id) as conn:
res = conn.execute("SELECT event from event_logs")
return res.fetchall()
def _event_types(out_events):
return list(map(lambda e: e.dagster_event.event_type if e.dagster_event else None, out_events))
@solid
def should_succeed(context):
context.log.info("succeed")
return "yay"
@solid
def solid_one(_):
yield AssetMaterialization(asset_key=AssetKey("asset_1"))
yield Output(1)
@solid
def solid_two(_):
yield AssetMaterialization(asset_key=AssetKey("asset_2"))
yield AssetMaterialization(asset_key=AssetKey(["path", "to", "asset_3"]))
yield Output(1)
def one_solid():
solid_one()
def two_solids():
solid_one()
solid_two()
class TestEventLogStorage:
"""
You can extend this class to easily run these set of tests on any event log storage. When extending,
you simply need to override the `event_log_storage` fixture and return your implementation of
`EventLogStorage`.
For example:
```
class TestMyStorageImplementation(TestEventLogStorage):
__test__ = True
@pytest.fixture(scope='function', name='storage')
def event_log_storage(self): # pylint: disable=arguments-differ
return MyStorageImplementation()
```
"""
__test__ = False
@pytest.fixture(name="storage", params=[])
def event_log_storage(self, request):
with request.param() as s:
try:
yield s
finally:
s.dispose()
def test_init_log_storage(self, storage):
if isinstance(storage, InMemoryEventLogStorage):
assert not storage.is_persistent
else:
assert storage.is_persistent
def test_log_storage_run_not_found(self, storage):
assert storage.get_logs_for_run("bar") == []
def can_wipe(self):
# Whether the storage is allowed to wipe the event log
return True
def can_watch(self):
# Whether the storage is allowed to watch the event log
return True
def test_event_log_storage_store_events_and_wipe(self, storage):
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
storage.store_event(
EventLogEntry(
None,
"Message2",
"debug",
"",
DEFAULT_RUN_ID,
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.ENGINE_EVENT.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
)
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 1
assert storage.get_stats_for_run(DEFAULT_RUN_ID)
if self.can_wipe():
storage.wipe()
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
def test_event_log_storage_store_with_multiple_runs(self, storage):
runs = ["foo", "bar", "baz"]
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
storage.store_event(
EventLogEntry(
None,
"Message2",
"debug",
"",
run_id,
time.time(),
dagster_event=DagsterEvent(
DagsterEventType.STEP_SUCCESS.value,
"nonce",
event_specific_data=StepSuccessData(duration_ms=100.0),
),
)
)
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 1
assert storage.get_stats_for_run(run_id).steps_succeeded == 1
if self.can_wipe():
storage.wipe()
for run_id in runs:
assert len(storage.get_logs_for_run(run_id)) == 0
def test_event_log_storage_watch(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
watched = []
watcher = lambda x: watched.append(x) # pylint: disable=unnecessary-lambda
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
storage.store_event(create_test_event_log_record(str(1)))
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 1
assert len(watched) == 0
storage.watch(DEFAULT_RUN_ID, 0, watcher)
storage.store_event(create_test_event_log_record(str(2)))
storage.store_event(create_test_event_log_record(str(3)))
storage.store_event(create_test_event_log_record(str(4)))
attempts = 10
while len(watched) < 3 and attempts > 0:
time.sleep(0.5)
attempts -= 1
assert len(watched) == 3
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 4
storage.end_watch(DEFAULT_RUN_ID, watcher)
time.sleep(0.3) # this value scientifically selected from a range of attractive values
storage.store_event(create_test_event_log_record(str(5)))
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 5
assert len(watched) == 3
storage.delete_events(DEFAULT_RUN_ID)
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
assert len(watched) == 3
assert [int(evt.message) for evt in watched] == [2, 3, 4]
def test_event_log_storage_pagination(self, storage):
# interleave two runs events to ensure pagination is not affected by other runs
storage.store_event(create_test_event_log_record("A"))
storage.store_event(create_test_event_log_record(str(0), run_id="other_run"))
storage.store_event(create_test_event_log_record("B"))
storage.store_event(create_test_event_log_record(str(1), run_id="other_run"))
storage.store_event(create_test_event_log_record("C"))
storage.store_event(create_test_event_log_record(str(2), run_id="other_run"))
storage.store_event(create_test_event_log_record("D"))
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 4
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID, -1)) == 4
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID, 0)) == 3
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID, 1)) == 2
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID, 2)) == 1
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID, 3)) == 0
def test_event_log_delete(self, storage):
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
storage.store_event(create_test_event_log_record(str(0)))
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 1
assert storage.get_stats_for_run(DEFAULT_RUN_ID)
storage.delete_events(DEFAULT_RUN_ID)
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
def test_event_log_get_stats_without_start_and_success(self, storage):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
assert len(storage.get_logs_for_run(DEFAULT_RUN_ID)) == 0
assert storage.get_stats_for_run(DEFAULT_RUN_ID)
def test_event_log_get_stats_for_run(self, storage):
import math
enqueued_time = time.time()
launched_time = enqueued_time + 20
start_time = launched_time + 50
storage.store_event(
EventLogEntry(
None,
"message",
"debug",
"",
DEFAULT_RUN_ID,
enqueued_time,
dagster_event=DagsterEvent(
DagsterEventType.PIPELINE_ENQUEUED.value,
"nonce",
),
)
)
storage.store_event(
EventLogEntry(
None,
"message",
"debug",
"",
DEFAULT_RUN_ID,
launched_time,
dagster_event=DagsterEvent(
DagsterEventType.PIPELINE_STARTING.value,
"nonce",
),
)
)
storage.store_event(
EventLogEntry(
None,
"message",
"debug",
"",
DEFAULT_RUN_ID,
start_time,
dagster_event=DagsterEvent(
DagsterEventType.PIPELINE_START.value,
"nonce",
),
)
)
assert math.isclose(storage.get_stats_for_run(DEFAULT_RUN_ID).enqueued_time, enqueued_time)
assert math.isclose(storage.get_stats_for_run(DEFAULT_RUN_ID).launch_time, launched_time)
assert math.isclose(storage.get_stats_for_run(DEFAULT_RUN_ID).start_time, start_time)
def test_event_log_step_stats(self, storage):
# When an event log doesn't have a PIPELINE_START or PIPELINE_SUCCESS | PIPELINE_FAILURE event,
# we want to ensure storage.get_stats_for_run(...) doesn't throw an error.
for record in _stats_records(run_id=DEFAULT_RUN_ID):
storage.store_event(record)
step_stats = storage.get_step_stats_for_run(DEFAULT_RUN_ID)
assert len(step_stats) == 4
a_stats = [stats for stats in step_stats if stats.step_key == "A"][0]
assert a_stats.step_key == "A"
assert a_stats.status.value == "SUCCESS"
assert a_stats.end_time - a_stats.start_time == 100
assert len(a_stats.attempts_list) == 1
b_stats = [stats for stats in step_stats if stats.step_key == "B"][0]
assert b_stats.step_key == "B"
assert b_stats.status.value == "FAILURE"
assert b_stats.end_time - b_stats.start_time == 50
assert len(b_stats.attempts_list) == 1
c_stats = [stats for stats in step_stats if stats.step_key == "C"][0]
assert c_stats.step_key == "C"
assert c_stats.status.value == "SKIPPED"
assert c_stats.end_time - c_stats.start_time == 25
assert len(c_stats.attempts_list) == 1
d_stats = [stats for stats in step_stats if stats.step_key == "D"][0]
assert d_stats.step_key == "D"
assert d_stats.status.value == "SUCCESS"
assert d_stats.end_time - d_stats.start_time == 150
assert len(d_stats.materializations) == 3
assert len(d_stats.expectation_results) == 2
assert len(c_stats.attempts_list) == 1
def test_secondary_index(self, storage):
if not isinstance(storage, SqlEventLogStorage):
pytest.skip("This test is for SQL-backed Event Log behavior")
# test that newly initialized DBs will have the secondary indexes built
for name in EVENT_LOG_DATA_MIGRATIONS.keys():
assert storage.has_secondary_index(name)
# test the generic API with garbage migration names
assert not storage.has_secondary_index("_A")
assert not storage.has_secondary_index("_B")
storage.enable_secondary_index("_A")
assert storage.has_secondary_index("_A")
assert not storage.has_secondary_index("_B")
storage.enable_secondary_index("_B")
assert storage.has_secondary_index("_A")
assert storage.has_secondary_index("_B")
def test_basic_event_store(self, storage):
if not isinstance(storage, SqlEventLogStorage):
pytest.skip("This test is for SQL-backed Event Log behavior")
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, _result = _synthesize_events(_solids, run_id=DEFAULT_RUN_ID)
for event in events:
storage.store_event(event)
rows = _fetch_all_events(storage, run_id=DEFAULT_RUN_ID)
out_events = list(map(lambda r: deserialize_json_to_dagster_namedtuple(r[0]), rows))
# messages can come out of order
event_type_counts = Counter(_event_types(out_events))
assert event_type_counts
assert Counter(_event_types(out_events)) == Counter(_event_types(events))
def test_basic_get_logs_for_run(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
out_events = storage.get_logs_for_run(result.run_id)
assert _event_types(out_events) == _event_types(events)
def test_get_logs_for_run_cursor_limit(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
out_events = []
cursor = -1
fuse = 0
chunk_size = 2
while fuse < 50:
fuse += 1
# fetch in batches w/ limit & cursor
chunk = storage.get_logs_for_run(result.run_id, cursor=cursor, limit=chunk_size)
if not chunk:
break
assert len(chunk) <= chunk_size
out_events += chunk
cursor += len(chunk)
assert _event_types(out_events) == _event_types(events)
def test_wipe_sql_backed_event_log(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
out_events = storage.get_logs_for_run(result.run_id)
assert _event_types(out_events) == _event_types(events)
if self.can_wipe():
storage.wipe()
assert storage.get_logs_for_run(result.run_id) == []
def test_delete_sql_backed_event_log(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
out_events = storage.get_logs_for_run(result.run_id)
assert _event_types(out_events) == _event_types(events)
storage.delete_events(result.run_id)
assert storage.get_logs_for_run(result.run_id) == []
def test_get_logs_for_run_of_type(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
assert _event_types(
storage.get_logs_for_run(result.run_id, of_type=DagsterEventType.PIPELINE_SUCCESS)
) == [DagsterEventType.PIPELINE_SUCCESS]
assert _event_types(
storage.get_logs_for_run(result.run_id, of_type=DagsterEventType.STEP_SUCCESS)
) == [DagsterEventType.STEP_SUCCESS]
def test_basic_get_logs_for_run_cursor(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events, result = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
assert _event_types(storage.get_logs_for_run(result.run_id, cursor=-1)) == _event_types(
events
)
def test_basic_get_logs_for_run_multiple_runs(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events_one, result_one = _synthesize_events(_solids)
for event in events_one:
storage.store_event(event)
events_two, result_two = _synthesize_events(_solids)
for event in events_two:
storage.store_event(event)
out_events_one = storage.get_logs_for_run(result_one.run_id)
assert len(out_events_one) == len(events_one)
assert set(_event_types(out_events_one)) == set(_event_types(events_one))
assert set(map(lambda e: e.run_id, out_events_one)) == {result_one.run_id}
stats_one = storage.get_stats_for_run(result_one.run_id)
assert stats_one.steps_succeeded == 1
out_events_two = storage.get_logs_for_run(result_two.run_id)
assert len(out_events_two) == len(events_two)
assert set(_event_types(out_events_two)) == set(_event_types(events_two))
assert set(map(lambda e: e.run_id, out_events_two)) == {result_two.run_id}
stats_two = storage.get_stats_for_run(result_two.run_id)
assert stats_two.steps_succeeded == 1
def test_basic_get_logs_for_run_multiple_runs_cursors(self, storage):
@solid
def return_one(_):
return 1
def _solids():
return_one()
events_one, result_one = _synthesize_events(_solids)
for event in events_one:
storage.store_event(event)
events_two, result_two = _synthesize_events(_solids)
for event in events_two:
storage.store_event(event)
out_events_one = storage.get_logs_for_run(result_one.run_id, cursor=-1)
assert len(out_events_one) == len(events_one)
assert set(_event_types(out_events_one)) == set(_event_types(events_one))
assert set(map(lambda e: e.run_id, out_events_one)) == {result_one.run_id}
out_events_two = storage.get_logs_for_run(result_two.run_id, cursor=-1)
assert len(out_events_two) == len(events_two)
assert set(_event_types(out_events_two)) == set(_event_types(events_one))
assert set(map(lambda e: e.run_id, out_events_two)) == {result_two.run_id}
def test_event_watcher_single_run_event(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
@solid
def return_one(_):
return 1
def _solids():
return_one()
event_list = []
run_id = make_new_run_id()
storage.watch(run_id, -1, lambda x: event_list.append(x))
events, _ = _synthesize_events(_solids, run_id=run_id)
for event in events:
storage.store_event(event)
start = time.time()
while len(event_list) < len(events) and time.time() - start < TEST_TIMEOUT:
time.sleep(0.01)
assert len(event_list) == len(events)
assert all([isinstance(event, EventLogEntry) for event in event_list])
def test_event_watcher_filter_run_event(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
@solid
def return_one(_):
return 1
def _solids():
return_one()
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
# only watch one of the runs
event_list = []
storage.watch(run_id_two, -1, lambda x: event_list.append(x))
events_one, _result_one = _synthesize_events(_solids, run_id=run_id_one)
for event in events_one:
storage.store_event(event)
events_two, _result_two = _synthesize_events(_solids, run_id=run_id_two)
for event in events_two:
storage.store_event(event)
start = time.time()
while len(event_list) < len(events_two) and time.time() - start < TEST_TIMEOUT:
time.sleep(0.01)
assert len(event_list) == len(events_two)
assert all([isinstance(event, EventLogEntry) for event in event_list])
def test_event_watcher_filter_two_runs_event(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
@solid
def return_one(_):
return 1
def _solids():
return_one()
event_list_one = []
event_list_two = []
run_id_one = make_new_run_id()
run_id_two = make_new_run_id()
storage.watch(run_id_one, -1, lambda x: event_list_one.append(x))
storage.watch(run_id_two, -1, lambda x: event_list_two.append(x))
events_one, _result_one = _synthesize_events(_solids, run_id=run_id_one)
for event in events_one:
storage.store_event(event)
events_two, _result_two = _synthesize_events(_solids, run_id=run_id_two)
for event in events_two:
storage.store_event(event)
start = time.time()
while (
len(event_list_one) < len(events_one) or len(event_list_two) < len(events_two)
) and time.time() - start < TEST_TIMEOUT:
pass
assert len(event_list_one) == len(events_one)
assert len(event_list_two) == len(events_two)
assert all([isinstance(event, EventLogEntry) for event in event_list_one])
assert all([isinstance(event, EventLogEntry) for event in event_list_two])
def test_correct_timezone(self, storage):
curr_time = time.time()
event = EventLogEntry(
None,
"Message2",
"debug",
"",
"foo",
curr_time,
dagster_event=DagsterEvent(
DagsterEventType.PIPELINE_START.value,
"nonce",
event_specific_data=EngineEventData.in_process(999),
),
)
storage.store_event(event)
logs = storage.get_logs_for_run("foo")
assert len(logs) == 1
log = logs[0]
stats = storage.get_stats_for_run("foo")
assert int(log.timestamp) == int(stats.start_time)
assert int(log.timestamp) == int(curr_time)
def test_asset_materialization(self, storage):
asset_key = AssetKey(["path", "to", "asset_one"])
@solid
def materialize_one(_):
yield AssetMaterialization(
asset_key=asset_key,
metadata={
"text": "hello",
"json": {"hello": "world"},
"one_float": 1.0,
"one_int": 1,
},
)
yield Output(1)
def _solids():
materialize_one()
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(_solids, instance=instance)
for event in events_one:
storage.store_event(event)
assert asset_key in set(storage.all_asset_keys())
events = storage.get_asset_events(asset_key)
assert len(events) == 1
event = events[0]
assert isinstance(event, EventLogEntry)
assert (
event.dagster_event.event_type_value == DagsterEventType.ASSET_MATERIALIZATION.value
)
records = storage.get_event_records(EventRecordsFilter(asset_key=asset_key))
assert len(records) == 1
record = records[0]
assert isinstance(record, EventLogRecord)
assert record.event_log_entry == event
def test_asset_events_error_parsing(self, storage):
if not isinstance(storage, SqlEventLogStorage):
pytest.skip("This test is for SQL-backed Event Log behavior")
_logs = []
def mock_log(msg):
_logs.append(msg)
asset_key = AssetKey("asset_one")
@solid
def materialize_one(_):
yield AssetMaterialization(asset_key=asset_key)
yield Output(1)
def _solids():
materialize_one()
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(_solids, instance=instance)
for event in events_one:
storage.store_event(event)
with ExitStack() as stack:
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sql_event_log.logging.warning",
side_effect=mock_log,
)
)
# for generic sql-based event log storage
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sql_event_log.deserialize_json_to_dagster_namedtuple",
return_value="not_an_event_record",
)
)
# for sqlite event log storage, which overrides the record fetching implementation
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sqlite.sqlite_event_log.deserialize_json_to_dagster_namedtuple",
return_value="not_an_event_record",
)
)
assert asset_key in set(storage.all_asset_keys())
events = storage.get_asset_events(asset_key)
assert len(events) == 0
assert len(_logs) == 1
assert re.match("Could not resolve event record as EventLogEntry", _logs[0])
with ExitStack() as stack:
_logs = [] # reset logs
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sql_event_log.logging.warning",
side_effect=mock_log,
)
)
# for generic sql-based event log storage
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sql_event_log.deserialize_json_to_dagster_namedtuple",
side_effect=seven.JSONDecodeError("error", "", 0),
)
)
# for sqlite event log storage, which overrides the record fetching implementation
stack.enter_context(
mock.patch(
"dagster.core.storage.event_log.sqlite.sqlite_event_log.deserialize_json_to_dagster_namedtuple",
side_effect=seven.JSONDecodeError("error", "", 0),
)
)
assert asset_key in set(storage.all_asset_keys())
events = storage.get_asset_events(asset_key)
assert len(events) == 0
assert len(_logs) == 1
assert re.match("Could not parse event record id", _logs[0])
def test_secondary_index_asset_keys(self, storage):
asset_key_one = AssetKey(["one"])
asset_key_two = AssetKey(["two"])
@solid
def materialize_one(_):
yield AssetMaterialization(asset_key=asset_key_one)
yield Output(1)
@solid
def materialize_two(_):
yield AssetMaterialization(asset_key=asset_key_two)
yield Output(1)
def _one():
materialize_one()
def _two():
materialize_two()
events_one, _ = _synthesize_events(_one)
for event in events_one:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
assert asset_key_one in set(asset_keys)
migrate_asset_key_data(storage)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
assert asset_key_one in set(asset_keys)
events_two, _ = _synthesize_events(_two)
for event in events_two:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 2
assert asset_key_one in set(asset_keys)
assert asset_key_two in set(asset_keys)
def test_run_step_stats(self, storage):
@solid(input_defs=[InputDefinition("_input", str)], output_defs=[OutputDefinition(str)])
def should_fail(context, _input):
context.log.info("fail")
raise Exception("booo")
def _one():
should_fail(should_succeed())
events, result = _synthesize_events(_one, check_success=False)
for event in events:
storage.store_event(event)
step_stats = sorted(storage.get_step_stats_for_run(result.run_id), key=lambda x: x.end_time)
assert len(step_stats) == 2
assert step_stats[0].step_key == "should_succeed"
assert step_stats[0].status == StepEventStatus.SUCCESS
assert step_stats[0].end_time > step_stats[0].start_time
assert step_stats[0].attempts == 1
assert len(step_stats[0].attempts_list) == 1
assert step_stats[1].step_key == "should_fail"
assert step_stats[1].status == StepEventStatus.FAILURE
assert step_stats[1].end_time > step_stats[0].start_time
assert step_stats[1].attempts == 1
assert len(step_stats[1].attempts_list) == 1
def test_run_step_stats_with_retries(self, storage):
@solid(input_defs=[InputDefinition("_input", str)], output_defs=[OutputDefinition(str)])
def should_retry(context, _input):
raise RetryRequested(max_retries=3)
def _one():
should_retry(should_succeed())
events, result = _synthesize_events(_one, check_success=False)
for event in events:
storage.store_event(event)
step_stats = storage.get_step_stats_for_run(result.run_id, step_keys=["should_retry"])
assert len(step_stats) == 1
assert step_stats[0].step_key == "should_retry"
assert step_stats[0].status == StepEventStatus.FAILURE
assert step_stats[0].end_time > step_stats[0].start_time
assert step_stats[0].attempts == 4
assert len(step_stats[0].attempts_list) == 4
# After adding the IN_PROGRESS field to the StepEventStatus enum, tests in internal fail
# Temporarily skipping this test
@pytest.mark.skip
def test_run_step_stats_with_in_progress(self, storage):
def _in_progress_run_records(run_id):
now = time.time()
return [
_event_record(run_id, "A", now - 325, DagsterEventType.STEP_START),
_event_record(run_id, "C", now - 175, DagsterEventType.STEP_START),
_event_record(run_id, "C", now - 150, DagsterEventType.STEP_SKIPPED),
_event_record(run_id, "D", now - 150, DagsterEventType.STEP_START),
_event_record(run_id, "D", now - 150, DagsterEventType.STEP_UP_FOR_RETRY),
_event_record(run_id, "E", now - 150, DagsterEventType.STEP_START),
_event_record(run_id, "E", now - 150, DagsterEventType.STEP_UP_FOR_RETRY),
_event_record(run_id, "E", now - 125, DagsterEventType.STEP_RESTARTED),
]
for record in _in_progress_run_records(run_id=DEFAULT_RUN_ID):
storage.store_event(record)
step_stats = storage.get_step_stats_for_run(DEFAULT_RUN_ID)
assert len(step_stats) == 4
assert step_stats[0].step_key == "A"
assert step_stats[0].status == StepEventStatus.IN_PROGRESS
assert not step_stats[0].end_time
assert step_stats[0].attempts == 1
assert len(step_stats[0].attempts_list) == 1
assert step_stats[1].step_key == "C"
assert step_stats[1].status == StepEventStatus.SKIPPED
assert step_stats[1].end_time > step_stats[1].start_time
assert step_stats[1].attempts == 1
assert len(step_stats[1].attempts_list) == 1
assert step_stats[2].step_key == "D"
assert step_stats[2].status == StepEventStatus.IN_PROGRESS
assert not step_stats[2].end_time
assert step_stats[2].attempts == 1
assert len(step_stats[2].attempts_list) == 1
assert step_stats[3].step_key == "E"
assert step_stats[3].status == StepEventStatus.IN_PROGRESS
assert not step_stats[3].end_time
assert step_stats[3].attempts == 2
assert len(step_stats[3].attempts_list) == 2
def test_run_step_stats_with_resource_markers(self, storage):
@solid(required_resource_keys={"foo"})
def foo_solid():
pass
def _pipeline():
foo_solid()
events, result = _synthesize_events(_pipeline, check_success=False)
for event in events:
storage.store_event(event)
step_stats = storage.get_step_stats_for_run(result.run_id)
assert len(step_stats) == 1
assert step_stats[0].step_key == "foo_solid"
assert step_stats[0].status == StepEventStatus.SUCCESS
assert step_stats[0].end_time > step_stats[0].start_time
assert len(step_stats[0].markers) == 1
assert step_stats[0].markers[0].end_time >= step_stats[0].markers[0].start_time + 0.1
def test_get_event_records(self, storage):
if isinstance(storage, SqliteEventLogStorage):
# test sqlite in test_get_event_records_sqlite
pytest.skip()
asset_key = AssetKey(["path", "to", "asset_one"])
@solid
def materialize_one(_):
yield AssetMaterialization(
asset_key=asset_key,
metadata={
"text": "hello",
"json": {"hello": "world"},
"one_float": 1.0,
"one_int": 1,
},
)
yield Output(1)
def _solids():
materialize_one()
events, _ = _synthesize_events(_solids)
for event in events:
storage.store_event(event)
all_records = storage.get_event_records()
# all logs returned in descending order
assert all_records
min_record_num = all_records[-1].storage_id
max_record_num = min_record_num + len(all_records) - 1
assert [r[0] for r in all_records] == list(range(max_record_num, min_record_num - 1, -1))
assert _event_types([all_records[0].event_log_entry]) == [DagsterEventType.PIPELINE_SUCCESS]
assert _event_types([all_records[-1].event_log_entry]) == [DagsterEventType.PIPELINE_START]
# after cursor
assert not list(
filter(
lambda r: r.storage_id <= 2,
storage.get_event_records(EventRecordsFilter(after_cursor=2)),
)
)
assert [
i.storage_id
for i in storage.get_event_records(
EventRecordsFilter(after_cursor=min_record_num + 2), ascending=True, limit=2
)
] == [min_record_num + 3, min_record_num + 4]
assert [
i.storage_id
for i in storage.get_event_records(
EventRecordsFilter(after_cursor=min_record_num + 2), ascending=False, limit=2
)
] == [max_record_num, max_record_num - 1]
filtered_records = storage.get_event_records(
EventRecordsFilter(event_type=DagsterEventType.PIPELINE_SUCCESS)
)
assert _event_types([r.event_log_entry for r in filtered_records]) == [
DagsterEventType.PIPELINE_SUCCESS
]
def test_get_event_records_sqlite(self, storage):
# test for sqlite only because sqlite requires special logic to handle cross-run queries
if not isinstance(storage, SqliteEventLogStorage):
pytest.skip()
asset_key = AssetKey(["path", "to", "asset_one"])
events = []
def _append_event(event):
events.append(event)
@solid
def materialize_one(_):
yield AssetMaterialization(
asset_key=asset_key,
metadata={
"text": "hello",
"json": {"hello": "world"},
"one_float": 1.0,
"one_int": 1,
},
)
yield Output(1)
@pipeline(mode_defs=[_mode_def(_append_event)])
def a_pipe():
materialize_one()
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
# first run
execute_run(
InMemoryPipeline(a_pipe),
instance.create_run_for_pipeline(
a_pipe, run_id="1", run_config={"loggers": {"callback": {}, "console": {}}}
),
instance,
)
for event in events:
storage.store_event(event)
run_records = instance.get_run_records()
assert len(run_records) == 1
# all logs returned in descending order
all_event_records = storage.get_event_records()
assert _event_types([all_event_records[0].event_log_entry]) == [
DagsterEventType.PIPELINE_SUCCESS
]
assert _event_types([all_event_records[-1].event_log_entry]) == [
DagsterEventType.PIPELINE_START
]
# second run
events = []
execute_run(
InMemoryPipeline(a_pipe),
instance.create_run_for_pipeline(
a_pipe, run_id="2", run_config={"loggers": {"callback": {}, "console": {}}}
),
instance,
)
run_records = instance.get_run_records()
assert len(run_records) == 2
for event in events:
storage.store_event(event)
# third run
events = []
execute_run(
InMemoryPipeline(a_pipe),
instance.create_run_for_pipeline(
a_pipe, run_id="3", run_config={"loggers": {"callback": {}, "console": {}}}
),
instance,
)
run_records = instance.get_run_records()
assert len(run_records) == 3
for event in events:
storage.store_event(event)
# of_type
filtered_records = storage.get_event_records(
EventRecordsFilter(
event_type=DagsterEventType.PIPELINE_SUCCESS,
after_cursor=RunShardedEventsCursor(
id=0, run_updated_after=run_records[-1].update_timestamp
), # events after first run
),
ascending=True,
)
assert len(filtered_records) == 2
assert _event_types([r.event_log_entry for r in filtered_records]) == [
DagsterEventType.PIPELINE_SUCCESS,
DagsterEventType.PIPELINE_SUCCESS,
]
assert [r.event_log_entry.run_id for r in filtered_records] == ["2", "3"]
def test_watch_exc_recovery(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
# test that an exception in one watch doesn't fail out others
@solid
def return_one(_):
return 1
def _solids():
return_one()
err_run_id = make_new_run_id()
safe_run_id = make_new_run_id()
class CBException(Exception):
pass
def _throw(_):
raise CBException("problem in watch callback")
err_events, _ = _synthesize_events(_solids, run_id=err_run_id)
safe_events, _ = _synthesize_events(_solids, run_id=safe_run_id)
event_list = []
storage.watch(err_run_id, -1, _throw)
storage.watch(safe_run_id, -1, lambda x: event_list.append(x))
for event in err_events:
storage.store_event(event)
storage.end_watch(err_run_id, _throw)
for event in safe_events:
storage.store_event(event)
start = time.time()
while len(event_list) < len(safe_events) and time.time() - start < TEST_TIMEOUT:
time.sleep(0.01)
assert len(event_list) == len(safe_events)
assert all([isinstance(event, EventLogEntry) for event in event_list])
# https://github.com/dagster-io/dagster/issues/5127
@pytest.mark.skip
def test_watch_unwatch(self, storage):
if not self.can_watch():
pytest.skip("storage cannot watch runs")
# test for dead lock bug
@solid
def return_one(_):
return 1
def _solids():
return_one()
err_run_id = make_new_run_id()
safe_run_id = make_new_run_id()
def _unsub(_):
storage.end_watch(err_run_id, _unsub)
err_events, _ = _synthesize_events(_solids, run_id=err_run_id)
safe_events, _ = _synthesize_events(_solids, run_id=safe_run_id)
event_list = []
# Direct end_watch emulates behavior of clean up on exception downstream
# of the subscription in the dagit webserver.
storage.watch(err_run_id, -1, _unsub)
# Other active watches should proceed correctly.
storage.watch(safe_run_id, -1, lambda x: event_list.append(x))
for event in err_events:
storage.store_event(event)
for event in safe_events:
storage.store_event(event)
start = time.time()
while len(event_list) < len(safe_events) and time.time() - start < TEST_TIMEOUT:
time.sleep(0.01)
assert len(event_list) == len(safe_events)
assert all([isinstance(event, EventLogEntry) for event in event_list])
def test_engine_event_markers(self, storage):
@solid
def return_one(_):
return 1
@pipeline
def a_pipe():
return_one()
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
run_id = make_new_run_id()
run = instance.create_run_for_pipeline(a_pipe, run_id=run_id)
instance.report_engine_event(
"blah blah", run, EngineEventData(marker_start="FOO"), step_key="return_one"
)
instance.report_engine_event(
"blah blah", run, EngineEventData(marker_end="FOO"), step_key="return_one"
)
logs = storage.get_logs_for_run(run_id)
for entry in logs:
assert entry.step_key == "return_one"
def test_latest_materializations(self, storage):
@solid
def one(_):
yield AssetMaterialization(AssetKey("a"), tags={"num": str(1)})
yield AssetMaterialization(AssetKey("b"), tags={"num": str(1)})
yield AssetMaterialization(AssetKey("c"), tags={"num": str(1)})
yield AssetMaterialization(AssetKey("d"), tags={"num": str(1)})
yield AssetObservation(AssetKey("a"), metadata={"foo": "bar"})
yield Output(1)
@solid
def two(_):
yield AssetMaterialization(AssetKey("b"), tags={"num": str(2)})
yield AssetMaterialization(AssetKey("c"), tags={"num": str(2)})
yield Output(2)
def _event_tags(event):
assert event.dagster_event_type == DagsterEventType.ASSET_MATERIALIZATION
return event.dagster_event.step_materialization_data.materialization.tags
def _fetch_events(storage):
return storage.get_latest_materialization_events(
[
AssetKey("a"),
AssetKey("b"),
AssetKey("c"),
AssetKey("d"),
]
)
events, _ = _synthesize_events(lambda: one())
for event in events:
storage.store_event(event)
events_by_key = _fetch_events(storage)
assert len(events_by_key) == 4
assert _event_tags(events_by_key[AssetKey("a")])["num"] == "1"
assert _event_tags(events_by_key[AssetKey("b")])["num"] == "1"
assert _event_tags(events_by_key[AssetKey("c")])["num"] == "1"
assert _event_tags(events_by_key[AssetKey("d")])["num"] == "1"
# wipe 2 of the assets, make sure we respect that
if self.can_wipe():
storage.wipe_asset(AssetKey("a"))
storage.wipe_asset(AssetKey("b"))
events_by_key = _fetch_events(storage)
assert events_by_key.get(AssetKey("a")) is None
assert events_by_key.get(AssetKey("b")) is None
assert _event_tags(events_by_key[AssetKey("c")])["num"] == "1"
assert _event_tags(events_by_key[AssetKey("d")])["num"] == "1"
# rematerialize one of the wiped assets, one of the existing assets
events, _ = _synthesize_events(lambda: two())
for event in events:
storage.store_event(event)
events_by_key = _fetch_events(storage)
assert events_by_key.get(AssetKey("a")) is None
assert _event_tags(events_by_key[AssetKey("b")])["num"] == "2"
assert _event_tags(events_by_key[AssetKey("c")])["num"] == "2"
assert _event_tags(events_by_key[AssetKey("d")])["num"] == "1"
else:
events, _ = _synthesize_events(lambda: two())
for event in events:
storage.store_event(event)
events_by_key = _fetch_events(storage)
assert _event_tags(events_by_key[AssetKey("a")])["num"] == "1"
assert _event_tags(events_by_key[AssetKey("b")])["num"] == "2"
assert _event_tags(events_by_key[AssetKey("c")])["num"] == "2"
assert _event_tags(events_by_key[AssetKey("d")])["num"] == "1"
def test_asset_keys(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: one_solid(), instance=instance)
events_two, _ = _synthesize_events(lambda: two_solids(), instance=instance)
for event in events_one + events_two:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 3
assert set([asset_key.to_string() for asset_key in asset_keys]) == set(
['["asset_1"]', '["asset_2"]', '["path", "to", "asset_3"]']
)
def test_has_asset_key(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: one_solid(), instance=instance)
events_two, _ = _synthesize_events(lambda: two_solids(), instance=instance)
for event in events_one + events_two:
storage.store_event(event)
assert storage.has_asset_key(AssetKey(["path", "to", "asset_3"]))
assert not storage.has_asset_key(AssetKey(["path", "to", "bogus", "asset"]))
def test_asset_events(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: one_solid(), instance=instance)
events_two, _ = _synthesize_events(lambda: two_solids(), instance=instance)
for event in events_one + events_two:
storage.store_event(event)
asset_events = storage.get_asset_events(AssetKey("asset_1"))
assert len(asset_events) == 2
for event in asset_events:
assert isinstance(event, EventLogEntry)
assert event.is_dagster_event
assert event.dagster_event.event_type == DagsterEventType.ASSET_MATERIALIZATION
assert event.dagster_event.asset_key
asset_events = storage.get_asset_events(AssetKey(["path", "to", "asset_3"]))
assert len(asset_events) == 1
def test_asset_events_range(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: one_solid(), instance=instance)
two_solids_first, _ = _synthesize_events(lambda: two_solids(), instance=instance)
two_solids_second, _ = _synthesize_events(lambda: two_solids(), instance=instance)
for event in events_one + two_solids_first + two_solids_second:
storage.store_event(event)
# descending
asset_events = storage.get_asset_events(AssetKey("asset_1"), include_cursor=True)
assert len(asset_events) == 3
[id_three, id_two, id_one] = [id for id, event in asset_events]
after_events = storage.get_asset_events(
AssetKey("asset_1"), include_cursor=True, after_cursor=id_one
)
assert len(after_events) == 2
assert [id for id, event in after_events] == [id_three, id_two]
before_events = storage.get_asset_events(
AssetKey("asset_1"), include_cursor=True, before_cursor=id_three
)
assert len(before_events) == 2
assert [id for id, event in before_events] == [id_two, id_one]
between_events = storage.get_asset_events(
AssetKey("asset_1"),
include_cursor=True,
before_cursor=id_three,
after_cursor=id_one,
)
assert len(between_events) == 1
assert [id for id, event in between_events] == [id_two]
# ascending
asset_events = storage.get_asset_events(
AssetKey("asset_1"), include_cursor=True, ascending=True
)
assert len(asset_events) == 3
[id_one, id_two, id_three] = [id for id, event in asset_events]
after_events = storage.get_asset_events(
AssetKey("asset_1"), include_cursor=True, after_cursor=id_one, ascending=True
)
assert len(after_events) == 2
assert [id for id, event in after_events] == [id_two, id_three]
before_events = storage.get_asset_events(
AssetKey("asset_1"), include_cursor=True, before_cursor=id_three, ascending=True
)
assert len(before_events) == 2
assert [id for id, event in before_events] == [id_one, id_two]
between_events = storage.get_asset_events(
AssetKey("asset_1"),
include_cursor=True,
before_cursor=id_three,
after_cursor=id_one,
ascending=True,
)
assert len(between_events) == 1
assert [id for id, event in between_events] == [id_two]
def test_asset_run_ids(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
one_run_id = "one"
two_run_id = "two"
one_events, _ = _synthesize_events(
lambda: one_solid(), run_id=one_run_id, instance=instance
)
two_events, _ = _synthesize_events(
lambda: two_solids(), run_id=two_run_id, instance=instance
)
for event in one_events + two_events:
storage.store_event(event)
run_ids = storage.get_asset_run_ids(AssetKey("asset_1"))
assert set(run_ids) == set([one_run_id, two_run_id])
def test_asset_normalization(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
@solid
def solid_normalization(_):
yield AssetMaterialization(asset_key="path/to-asset_4")
yield Output(1)
events, _ = _synthesize_events(lambda: solid_normalization(), instance=instance)
for event in events:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
asset_key = asset_keys[0]
assert asset_key.to_string() == '["path", "to", "asset_4"]'
assert asset_key.path == ["path", "to", "asset_4"]
def test_asset_wipe(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
one_run_id = "one_run_id"
two_run_id = "two_run_id"
events_one, _ = _synthesize_events(
lambda: one_solid(), run_id=one_run_id, instance=instance
)
events_two, _ = _synthesize_events(
lambda: two_solids(), run_id=two_run_id, instance=instance
)
for event in events_one + events_two:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 3
assert storage.has_asset_key(AssetKey("asset_1"))
asset_events = storage.get_asset_events(AssetKey("asset_1"))
assert len(asset_events) == 2
asset_run_ids = storage.get_asset_run_ids(AssetKey("asset_1"))
assert set(asset_run_ids) == set([one_run_id, two_run_id])
log_count = len(storage.get_logs_for_run(one_run_id))
if self.can_wipe():
for asset_key in asset_keys:
storage.wipe_asset(asset_key)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 0
assert not storage.has_asset_key(AssetKey("asset_1"))
asset_events = storage.get_asset_events(AssetKey("asset_1"))
assert len(asset_events) == 0
asset_run_ids = storage.get_asset_run_ids(AssetKey("asset_1"))
assert set(asset_run_ids) == set()
assert log_count == len(storage.get_logs_for_run(one_run_id))
one_run_id = "one_run_id_2"
events_one, _ = _synthesize_events(
lambda: one_solid(), run_id=one_run_id, instance=instance
)
for event in events_one:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
assert storage.has_asset_key(AssetKey("asset_1"))
asset_events = storage.get_asset_events(AssetKey("asset_1"))
assert len(asset_events) == 1
asset_run_ids = storage.get_asset_run_ids(AssetKey("asset_1"))
assert set(asset_run_ids) == set([one_run_id])
def test_asset_secondary_index(self, storage):
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: one_solid(), instance=instance)
for event in events_one:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
migrate_asset_key_data(storage)
two_first_run_id = "first"
two_second_run_id = "second"
events_two, _ = _synthesize_events(
lambda: two_solids(), run_id=two_first_run_id, instance=instance
)
events_two_two, _ = _synthesize_events(
lambda: two_solids(), run_id=two_second_run_id, instance=instance
)
for event in events_two + events_two_two:
storage.store_event(event)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 3
storage.delete_events(two_first_run_id)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 3
storage.delete_events(two_second_run_id)
asset_keys = storage.all_asset_keys()
assert len(asset_keys) == 1
def test_asset_partition_query(self, storage):
@solid(config_schema={"partition": Field(str, is_required=False)})
def solid_partitioned(context):
yield AssetMaterialization(
asset_key=AssetKey("asset_key"), partition=context.solid_config.get("partition")
)
yield Output(1)
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
get_partitioned_config = lambda partition: {
"solids": {"solid_partitioned": {"config": {"partition": partition}}}
}
for partition in [f"partition_{x}" for x in ["a", "a", "b", "c"]]:
run_events, _ = _synthesize_events(
lambda: solid_partitioned(),
instance=instance,
run_config=get_partitioned_config(partition),
)
for event in run_events:
storage.store_event(event)
events = storage.get_asset_events(AssetKey("asset_key"))
assert len(events) == 4
events = storage.get_asset_events(
AssetKey("asset_key"), partitions=["partition_a", "partition_b"]
)
assert len(events) == 3
def test_get_asset_keys(self, storage):
@op
def gen_op():
yield AssetMaterialization(asset_key=AssetKey(["a"]))
yield AssetMaterialization(asset_key=AssetKey(["c"]))
yield AssetMaterialization(asset_key=AssetKey(["banana"]))
yield AssetMaterialization(asset_key=AssetKey(["b", "x"]))
yield AssetMaterialization(asset_key=AssetKey(["b", "y"]))
yield AssetMaterialization(asset_key=AssetKey(["b", "z"]))
yield Output(1)
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events, _ = _synthesize_events(lambda: gen_op(), instance=instance)
for event in events:
storage.store_event(event)
asset_keys = storage.get_asset_keys()
assert len(asset_keys) == 6
# should come out sorted
assert [asset_key.to_string() for asset_key in asset_keys] == [
'["a"]',
'["b", "x"]',
'["b", "y"]',
'["b", "z"]',
'["banana"]',
'["c"]',
]
# pagination fields
asset_keys = storage.get_asset_keys(cursor='["b", "y"]', limit=1)
assert len(asset_keys) == 1
assert asset_keys[0].to_string() == '["b", "z"]'
# prefix filter
asset_keys = storage.get_asset_keys(prefix=["b"])
assert len(asset_keys) == 3
assert [asset_key.to_string() for asset_key in asset_keys] == [
'["b", "x"]',
'["b", "y"]',
'["b", "z"]',
]
def test_get_materialization_count_by_partition(self, storage):
a = AssetKey("no_materializations_asset")
b = AssetKey("no_partitions_asset")
c = AssetKey("two_partitions_asset")
d = AssetKey("one_partition_asset")
@op
def materialize():
yield AssetMaterialization(b)
yield AssetMaterialization(c, partition="a")
yield Output(None)
@op
def materialize_two():
yield AssetMaterialization(d, partition="x")
yield AssetMaterialization(c, partition="a")
yield AssetMaterialization(c, partition="b")
yield Output(None)
def _fetch_counts(storage):
return storage.get_materialization_count_by_partition([c, d])
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: materialize(), instance=instance)
for event in events_one:
storage.store_event(event)
materialization_count_by_key = storage.get_materialization_count_by_partition([a, b, c])
assert materialization_count_by_key.get(a) == {}
assert materialization_count_by_key.get(b) == {}
assert materialization_count_by_key.get(c)["a"] == 1
assert len(materialization_count_by_key.get(c)) == 1
events_two, _ = _synthesize_events(lambda: materialize_two(), instance=instance)
for event in events_two:
storage.store_event(event)
materialization_count_by_key = storage.get_materialization_count_by_partition([a, b, c])
assert materialization_count_by_key.get(c)["a"] == 2
assert materialization_count_by_key.get(c)["b"] == 1
# wipe asset, make sure we respect that
if self.can_wipe():
storage.wipe_asset(c)
materialization_count_by_partition = _fetch_counts(storage)
assert materialization_count_by_partition.get(c) == {}
# rematerialize wiped asset
events, _ = _synthesize_events(lambda: materialize_two(), instance=instance)
for event in events:
storage.store_event(event)
materialization_count_by_partition = _fetch_counts(storage)
assert materialization_count_by_partition.get(c)["a"] == 1
assert materialization_count_by_partition.get(d)["x"] == 2
def test_get_observation(self, storage):
a = AssetKey(["key_a"])
@op
def gen_op():
yield AssetObservation(asset_key=a, metadata={"foo": "bar"})
yield Output(1)
with instance_for_test() as instance:
if not storage._instance: # pylint: disable=protected-access
storage.register_instance(instance)
events_one, _ = _synthesize_events(lambda: gen_op(), instance=instance)
for event in events_one:
storage.store_event(event)
records = storage.get_event_records(
EventRecordsFilter(
event_type=DagsterEventType.ASSET_OBSERVATION,
asset_key=a,
)
)
assert len(records) == 1
| 36.213319
| 120
| 0.601894
|
8dbdd9fd2d5c9074921def17c77236652f0fec7e
| 8,614
|
py
|
Python
|
nurse_scheduling.py
|
kartikd2001/nurse-scheduling
|
32c61e02b87cbd16a3a0ffe5cc90068b23a2b73a
|
[
"Apache-2.0"
] | null | null | null |
nurse_scheduling.py
|
kartikd2001/nurse-scheduling
|
32c61e02b87cbd16a3a0ffe5cc90068b23a2b73a
|
[
"Apache-2.0"
] | null | null | null |
nurse_scheduling.py
|
kartikd2001/nurse-scheduling
|
32c61e02b87cbd16a3a0ffe5cc90068b23a2b73a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code includes an implementation of the algorithm described in Ikeda,
# K., Nakamura, Y. & Humble, T.S. Application of Quantum Annealing to Nurse
# Scheduling Problem. Sci Rep 9, 12837 (2019).
# https://doi.org/10.1038/s41598-019-49172-3, © The Author(s) 2019, use of
# which is licensed under a Creative Commons Attribution 4.0 International
# License (To view a copy of this license, visit
# http://creativecommons.org/licenses/by/4.0/).
from dwave.system import LeapHybridSampler
from dimod import BinaryQuadraticModel
from collections import defaultdict
from copy import deepcopy
# Overall model variables: problem size
# binary variable q_nd is the assignment of nurse n to day d
n_nurses = 3 # count nurses n = 1 ... n_nurses
n_days = 11 # count scheduling days as d = 1 ... n_days
size = n_days * n_nurses
# Parameters for hard nurse constraint
# a is a positive correlation coefficient for implementing the hard nurse
# constraint - value provided by Ikeda, Nakamura, Humble
a = 3.5
# Parameters for hard shift constraint
# Hard shift constraint: at least one nurse working every day
# Lagrange parameter, for hard shift constraint, on workforce and effort
lagrange_hard_shift = 1.3
workforce = 1 # Workforce function W(d) - set to a constant for now
effort = 1 # Effort function E(n) - set to a constant for now
# Parameters for soft nurse constraint
# Soft nurse constraint: all nurses should have approximately even work
# schedules
# Lagrange parameter, for shift constraints, on work days is called gamma
# in the paper
# Minimum duty days 'min_duty_days' - the number of work days that each
# nurse wants
# to be scheduled. At present, each will do the minimum on average.
# The parameter gamma's value suggested by Ikeda, Nakamura, Humble
lagrange_soft_nurse = 0.3 # Lagrange parameter for soft nurse, gamma
preference = 1 # preference function - constant for now
min_duty_days = int(n_days/n_nurses)
# Find composite index into 1D list for (nurse_index, day_index)
def get_index(nurse_index, day_index):
return nurse_index * n_days + day_index
# Inverse of get_index - given a composite index in a 1D list, return the
# nurse_index and day_index
def get_nurse_and_day(index):
nurse_index, day_index = divmod(index, n_days)
return nurse_index, day_index
# Hard nurse constraint: no nurse works two consecutive days
# It does not have Lagrange parameter - instead, J matrix
# symmetric, real-valued interaction matrix J, whereas all terms are
# a or zero.
# composite indices i(n, d) and j(n, d) as functions of n and d
# J_i(n,d)j(n,d+1) = a and 0 otherwise.
J = defaultdict(int)
for nurse in range(n_nurses):
for day in range(n_days - 1):
nurse_day_1 = get_index(nurse, day)
nurse_day_2 = get_index(nurse, day+1)
J[nurse_day_1, nurse_day_2] = a
# Q matrix assign the cost term, the J matrix
Q = deepcopy(J)
# Hard shift constraint: at least one nurse working every day
# The sum is over each day.
# This constraint tries to make (effort * sum(q_i)) equal to workforce,
# which is set to a constant in this implementation, so that one nurse
# is working each day.
# Overall hard shift constraint:
# lagrange_hard_shift * sum_d ((sum_n(effort * q_i(n,d)) - workforce) ** 2)
#
# with constant effort and constant workforce:
# = lagrange_hard_shift * sum_d ( effort * sum_n q_i(n,d) - workforce ) ** 2
# = lagrange_hard_shift * sum_d [ effort ** 2 * (sum_n q_i(n,d) ** 2)
# - 2 effort * workforce * sum_n q_i(n,d)
# + workforce ** 2 ]
# The constant term is moved to the offset, below, right before we solve
# the QUBO
#
# Expanding and merging the terms ( m is another sum over n ):
# lagrange_hard_shift * (effort ** 2 - 2 effort * workforce) *
# sum_d sum_n q_i(n,d)
# + lagrange_hard_shift * effort ** 2 * sum_d sum_m sum_n q_i(n,d) q_j(m, d) #
# Diagonal terms in hard shift constraint, without the workforce**2 term
for nurse in range(n_nurses):
for day in range(n_days):
ind = get_index(nurse, day)
Q[ind, ind] += lagrange_hard_shift * (effort ** 2 - (2 * workforce * effort))
# Off-diagonal terms in hard shift constraint
# Include only the same day, across nurses
for day in range(n_days):
for nurse1 in range(n_nurses):
for nurse2 in range(nurse1 + 1, n_nurses):
ind1 = get_index(nurse1, day)
ind2 = get_index(nurse2, day)
Q[ind1, ind2] += 2 * lagrange_hard_shift * effort ** 2
# Soft nurse constraint: all nurses should have approximately even work
# schedules
# This constraint tries to make preference * sum(q_i) equal to min_duty_days,
# so that the nurses have the same number of days. The sum of the q_i,
# over the number of days, is each nurse's number of days worked in the
# schedule.
# Overall soft nurse constraint:
# lagrange_soft_nurse * sum_n ((sum_d(preference * q_i(n,d)) - min_duty_days) ** 2)
# with constant preference and constant min_duty_days:
# = lagrange_soft_nurse * sum_n ( preference * sum_d q_i(n,d) - min_duty_days ) ** 2
# = lagrange_soft_nurse * sum_n [ preference ** 2 * (sum_d q_i(n,d) ** 2)
# - 2 preference * min_duty_days * sum_d q_i(n,d)
# + min_duty_days ** 2 ]
# The constant term is moved to the offset, below, right before we solve
# the QUBO
#
# The square of the the sum_d term becomes:
# Expanding and merging the terms (d1 and d2 are sums over d):
# = lagrange_soft_nurse * (preference ** 2 - 2 preference * min_duty_days) * sum_n sum_d q_i(n,d)
# + lagrange_soft_nurse * preference ** 2 * sum_n sum_d1 sum_d2 q_i(n,d1)
# * q_j(n, d2)
# Diagonal terms in soft nurse constraint, without the min_duty_days**2 term
for nurse in range(n_nurses):
for day in range(n_days):
ind = get_index(nurse, day)
Q[ind, ind] += lagrange_soft_nurse * (preference ** 2 - (2 * min_duty_days * preference))
# Off-diagonal terms in soft nurse constraint
# Include only the same nurse, across days
for nurse in range(n_nurses):
for day1 in range(n_days):
for day2 in range(day1 + 1, n_days):
ind1 = get_index(nurse, day1)
ind2 = get_index(nurse, day2)
Q[ind1, ind2] += 2 * lagrange_soft_nurse * preference ** 2
# Solve the problem, and use the offset to scale the energy
e_offset = (lagrange_hard_shift * n_days * workforce ** 2) + (lagrange_soft_nurse * n_nurses * min_duty_days ** 2)
bqm = BinaryQuadraticModel.from_qubo(Q, offset=e_offset)
sampler = LeapHybridSampler()
results = sampler.sample(bqm, label='Example - Nurse Scheduling')
# Get the results
smpl = results.first.sample
energy = results.first.energy
print("Size ", size)
print("Energy ", energy)
# Check the results by doing the sums directly
# J sum
sum_j = 0
for i in range(size):
for j in range(size):
sum_j += J[i, j] * smpl[i] * smpl[j]
print("Checking Hard nurse constraint ", sum_j)
# workforce sum
sum_w = 0
for d in range(n_days):
sum_n = 0
for n in range(n_nurses):
sum_n += effort * smpl[get_index(n, d)]
sum_w += lagrange_hard_shift * (sum_n - workforce) * (sum_n - workforce)
print("Checking Hard shift constraint ", sum_w)
# min_duty_days sum
sum_f = 0
for n in range(n_nurses):
sum_d = 0
for d in range(n_days):
sum_d += preference * smpl[get_index(n, d)]
sum_f += lagrange_soft_nurse * (sum_d - min_duty_days) * (sum_d - min_duty_days)
print("Checking Soft nurse constraint ", sum_f)
# Graphics
sched = [get_nurse_and_day(j) for j in range(size) if smpl[j] == 1]
str_header_for_output = " " * 11
str_header_for_output += " ".join(map(str, range(n_days)))
print(str_header_for_output)
for n in range(n_nurses):
str_row = ""
for d in range(n_days):
outcome = "X" if (n, d) in sched else " "
if d > 9:
outcome += " "
str_row += " " + outcome
print("Nurse ", n, str_row)
| 40.065116
| 114
| 0.69062
|
c1975209921000e5af9a8047c30c4d5c42546eac
| 4,712
|
py
|
Python
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/_container_service_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/_container_service_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_04_01/_container_service_client.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models
from ._configuration import ContainerServiceClientConfiguration
from .operations import AgentPoolsOperations, ManagedClustersOperations, Operations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ContainerServiceClient:
"""The Container Service Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerservice.v2020_04_01.operations.Operations
:ivar managed_clusters: ManagedClustersOperations operations
:vartype managed_clusters:
azure.mgmt.containerservice.v2020_04_01.operations.ManagedClustersOperations
:ivar agent_pools: AgentPoolsOperations operations
:vartype agent_pools: azure.mgmt.containerservice.v2020_04_01.operations.AgentPoolsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerServiceClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(self._client, self._config, self._serialize, self._deserialize)
self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerServiceClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 44.45283
| 124
| 0.703098
|
77448f502451e14bb6cb5967cc448ad18ddbfa7b
| 9,428
|
py
|
Python
|
fabric/io.py
|
robinhoodmarkets/fabric
|
716fed77464b1fd7100815c774e7265d8fa285d1
|
[
"BSD-2-Clause"
] | 1
|
2021-06-16T12:23:00.000Z
|
2021-06-16T12:23:00.000Z
|
fabric/io.py
|
robinhoodmarkets/fabric
|
716fed77464b1fd7100815c774e7265d8fa285d1
|
[
"BSD-2-Clause"
] | null | null | null |
fabric/io.py
|
robinhoodmarkets/fabric
|
716fed77464b1fd7100815c774e7265d8fa285d1
|
[
"BSD-2-Clause"
] | 1
|
2018-06-12T20:08:56.000Z
|
2018-06-12T20:08:56.000Z
|
from __future__ import with_statement
import sys
import time
import re
import socket
from select import select
from fabric.state import env, output, win32
from fabric.auth import get_password, set_password
import fabric.network
from fabric.network import ssh, normalize
from fabric.utils import RingBuffer
from fabric.exceptions import CommandTimeout
if win32:
import msvcrt
def _endswith(char_list, substring):
tail = char_list[-1 * len(substring):]
substring = list(substring)
return tail == substring
def _has_newline(bytelist):
return '\r' in bytelist or '\n' in bytelist
def output_loop(*args, **kwargs):
OutputLooper(*args, **kwargs).loop()
class OutputLooper(object):
def __init__(self, chan, attr, stream, capture, timeout):
self.chan = chan
self.stream = stream
self.capture = capture
self.timeout = timeout
self.read_func = getattr(chan, attr)
self.prefix = "[%s] %s: " % (
env.host_string,
"out" if attr == 'recv' else "err"
)
self.printing = getattr(output, 'stdout' if (attr == 'recv') else 'stderr')
self.linewise = (env.linewise or env.parallel)
self.reprompt = False
self.read_size = 4096
self.write_buffer = RingBuffer([], maxlen=len(self.prefix))
def _flush(self, text):
self.stream.write(text)
# Actually only flush if not in linewise mode.
# When linewise is set (e.g. in parallel mode) flushing makes
# doubling-up of line prefixes, and other mixed output, more likely.
if not env.linewise:
self.stream.flush()
self.write_buffer.extend(text)
def loop(self):
"""
Loop, reading from <chan>.<attr>(), writing to <stream> and buffering to <capture>.
Will raise `~fabric.exceptions.CommandTimeout` if network timeouts
continue to be seen past the defined ``self.timeout`` threshold.
(Timeouts before then are considered part of normal short-timeout fast
network reading; see Fabric issue #733 for background.)
"""
# Initialize loop variables
initial_prefix_printed = False
seen_cr = False
line = []
# Allow prefix to be turned off.
if not env.output_prefix:
self.prefix = ""
start = time.time()
while True:
# Handle actual read
try:
bytelist = self.read_func(self.read_size)
except socket.timeout:
elapsed = time.time() - start
if self.timeout is not None and elapsed > self.timeout:
raise CommandTimeout(timeout=self.timeout)
continue
# Empty byte == EOS
if bytelist == '':
# If linewise, ensure we flush any leftovers in the buffer.
if self.linewise and line:
self._flush(self.prefix)
self._flush("".join(line))
break
# A None capture variable implies that we're in open_shell()
if self.capture is None:
# Just print directly -- no prefixes, no capturing, nada
# And since we know we're using a pty in this mode, just go
# straight to stdout.
self._flush(bytelist)
# Otherwise, we're in run/sudo and need to handle capturing and
# prompts.
else:
# Print to user
if self.printing:
printable_bytes = bytelist
# Small state machine to eat \n after \r
if printable_bytes[-1] == "\r":
seen_cr = True
if printable_bytes[0] == "\n" and seen_cr:
printable_bytes = printable_bytes[1:]
seen_cr = False
while _has_newline(printable_bytes) and printable_bytes != "":
# at most 1 split !
cr = re.search("(\r\n|\r|\n)", printable_bytes)
if cr is None:
break
end_of_line = printable_bytes[:cr.start(0)]
printable_bytes = printable_bytes[cr.end(0):]
if not initial_prefix_printed:
self._flush(self.prefix)
if _has_newline(end_of_line):
end_of_line = ''
if self.linewise:
self._flush("".join(line) + end_of_line + "\n")
line = []
else:
self._flush(end_of_line + "\n")
initial_prefix_printed = False
if self.linewise:
line += [printable_bytes]
else:
if not initial_prefix_printed:
self._flush(self.prefix)
initial_prefix_printed = True
self._flush(printable_bytes)
# Now we have handled printing, handle interactivity
read_lines = re.split(r"(\r|\n|\r\n)", bytelist)
for fragment in read_lines:
# Store in capture buffer
self.capture += fragment
# Handle prompts
expected, response = self._get_prompt_response()
if expected:
del self.capture[-1 * len(expected):]
self.chan.sendall(str(response) + '\n')
else:
prompt = _endswith(self.capture, env.sudo_prompt)
try_again = (_endswith(self.capture, env.again_prompt + '\n')
or _endswith(self.capture, env.again_prompt + '\r\n'))
if prompt:
self.prompt()
elif try_again:
self.try_again()
# Print trailing new line if the last thing we printed was our line
# prefix.
if self.prefix and "".join(self.write_buffer) == self.prefix:
self._flush('\n')
def prompt(self):
# Obtain cached password, if any
password = get_password(*normalize(env.host_string))
# Remove the prompt itself from the capture buffer. This is
# backwards compatible with Fabric 0.9.x behavior; the user
# will still see the prompt on their screen (no way to avoid
# this) but at least it won't clutter up the captured text.
del self.capture[-1 * len(env.sudo_prompt):]
# If the password we just tried was bad, prompt the user again.
if (not password) or self.reprompt:
# Print the prompt and/or the "try again" notice if
# output is being hidden. In other words, since we need
# the user's input, they need to see why we're
# prompting them.
if not self.printing:
self._flush(self.prefix)
if self.reprompt:
self._flush(env.again_prompt + '\n' + self.prefix)
self._flush(env.sudo_prompt)
# Prompt for, and store, password. Give empty prompt so the
# initial display "hides" just after the actually-displayed
# prompt from the remote end.
self.chan.input_enabled = False
password = fabric.network.prompt_for_password(
prompt=" ", no_colon=True, stream=self.stream
)
self.chan.input_enabled = True
# Update env.password, env.passwords if necessary
user, host, port = normalize(env.host_string)
set_password(user, host, port, password)
# Reset reprompt flag
self.reprompt = False
# Send current password down the pipe
self.chan.sendall(password + '\n')
def try_again(self):
# Remove text from capture buffer
self.capture = self.capture[:len(env.again_prompt)]
# Set state so we re-prompt the user at the next prompt.
self.reprompt = True
def _get_prompt_response(self):
"""
Iterate through the request prompts dict and return the response and
original request if we find a match
"""
for tup in env.prompts.iteritems():
if _endswith(self.capture, tup[0]):
return tup
return None, None
def input_loop(chan, using_pty):
while not chan.exit_status_ready():
if win32:
have_char = msvcrt.kbhit()
else:
r, w, x = select([sys.stdin], [], [], 0.0)
have_char = (r and r[0] == sys.stdin)
if have_char and chan.input_enabled:
# Send all local stdin to remote end's stdin
byte = msvcrt.getch() if win32 else sys.stdin.read(1)
chan.sendall(byte)
# Optionally echo locally, if needed.
if not using_pty and env.echo_stdin:
# Not using fastprint() here -- it prints as 'user'
# output level, don't want it to be accidentally hidden
sys.stdout.write(byte)
sys.stdout.flush()
time.sleep(ssh.io_sleep)
| 39.613445
| 91
| 0.542851
|
1b310bcec0e1b6ffc0db2824c9a1a3556964e191
| 4,695
|
py
|
Python
|
scrapers/education.py
|
nicobaguio-un/hdx-scraper-covid-viz
|
ab9bb2fcc43fdd50de414e3c54eabff67933077f
|
[
"MIT"
] | null | null | null |
scrapers/education.py
|
nicobaguio-un/hdx-scraper-covid-viz
|
ab9bb2fcc43fdd50de414e3c54eabff67933077f
|
[
"MIT"
] | null | null | null |
scrapers/education.py
|
nicobaguio-un/hdx-scraper-covid-viz
|
ab9bb2fcc43fdd50de414e3c54eabff67933077f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from hdx.scraper.readers import read
from hdx.utilities.dateparse import parse_date, default_date
from hdx.utilities.text import get_fraction_str
logger = logging.getLogger(__name__)
def get_education(configuration, today, countryiso3s, regionlookup, downloader, scrapers=None):
name = 'education'
if scrapers and not any(scraper in name for scraper in scrapers):
return list(), list(), list(), list(), list(), list()
educationinfo = configuration[name]
datasetinfo = educationinfo['closures']
closures_headers, closures_iterator = read(downloader, datasetinfo)
closures = dict()
country_dates = dict()
for row in closures_iterator:
countryiso = row['ISO']
if not countryiso or countryiso not in countryiso3s:
continue
date = row['Date']
if isinstance(date, str):
date = parse_date(date)
if date > today:
continue
max_date = country_dates.get(countryiso, default_date)
if date < max_date:
continue
country_dates[countryiso] = date
closures[countryiso] = row['Status']
fully_closed = list()
for countryiso, closure in closures.items():
if closure.lower() == 'closed due to covid-19':
fully_closed.append(countryiso)
datasetinfo = educationinfo['enrolment']
learners_headers, learners_iterator = read(downloader, datasetinfo)
learners_012 = dict()
learners_3 = dict()
affected_learners = dict()
all_learners = dict()
for row in learners_iterator:
countryiso = row['ISO3']
if not countryiso or countryiso not in countryiso3s:
continue
l_0 = row['Pre-primary (both)']
l_1 = row['Primary (both)']
l_2 = row['Secondary (both)']
l_3 = row['Tertiary (both)']
l_012 = None
if l_0 != '-':
l_012 = int(l_0)
if l_1 != '-':
l_1 = int(l_1)
if l_012 is None:
l_012 = l_1
else:
l_012 += l_1
if l_2 != '-':
l_2 = int(l_2)
if l_012 is None:
l_012 = l_2
else:
l_012 += l_2
if l_012 is not None:
learners_012[countryiso] = l_012
if l_3 == '-':
l_3 = None
else:
l_3 = int(l_3)
learners_3[countryiso] = l_3
no_learners = None
if l_012 is not None:
no_learners = l_012
if l_3:
no_learners += l_3
elif l_3 is not None:
no_learners = l_3
if no_learners is not None:
all_learners[countryiso] = no_learners
if countryiso in fully_closed:
affected_learners[countryiso] = no_learners
affected_learners_total = dict()
learners_total = dict()
closed_countries = dict()
for countryiso in closures:
country_learners = all_learners.get(countryiso)
country_affected_learners = affected_learners.get(countryiso)
for region in regionlookup.iso3_to_region_and_hrp[countryiso]:
if country_learners is not None:
learners_total[region] = learners_total.get(region, 0) + country_learners
if country_affected_learners is not None:
affected_learners_total[region] = affected_learners_total.get(region, 0) + country_affected_learners
closed_countries[region] = closed_countries.get(region, 0) + 1
percentage_affected_learners = dict()
for region, no_learners in affected_learners_total.items():
percentage_affected_learners[region] = get_fraction_str(no_learners, learners_total[region])
logger.info('Processed education')
grheaders = ['No. affected learners', 'Percentage affected learners', 'No. closed countries']
grhxltags = ['#affected+learners', '#affected+learners+pct', '#status+country+closed']
headers = ['School Closure', 'No. pre-primary to upper-secondary learners', 'No. tertiary learners', 'No. affected learners']
hxltags = ['#impact+type', '#population+learners+pre_primary_to_secondary', '#population+learners+tertiary', '#affected+learners']
return [grheaders, grhxltags], [affected_learners_total, percentage_affected_learners, closed_countries], \
[(hxltag, datasetinfo['date'], datasetinfo['source'], datasetinfo['source_url']) for hxltag in hxltags], \
[headers, hxltags], [closures, learners_012, learners_3, affected_learners], \
[(hxltag, datasetinfo['date'], datasetinfo['source'], datasetinfo['source_url']) for hxltag in hxltags]
| 42.681818
| 134
| 0.632162
|
ade5888cc99f282e672e97e9019d8a74ab1da40d
| 1,941
|
py
|
Python
|
tests/compile/test_modes.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/compile/test_modes.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/compile/test_modes.py
|
abdalazizrashid/Theano-PyMC
|
90fa750461e91fb6281d494ae86404e2153fd7eb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test compilation modes
"""
import copy
import aesara
import aesara.tensor as tt
from aesara.compile import Mode
class TestBunchOfModes:
def test_modes(self):
# this is a quick test after the LazyLinker branch merge
# to check that all the current modes can still be used.
linker_classes_involved = []
predef_modes = ["FAST_COMPILE", "FAST_RUN", "DEBUG_MODE"]
# Linkers to use with regular Mode
if aesara.config.cxx:
linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc", "cvm", "cvm_nogc"]
else:
linkers = ["py", "c|py", "c|py_nogc", "vm", "vm_nogc"]
modes = predef_modes + [Mode(linker, "fast_run") for linker in linkers]
for mode in modes:
x = tt.matrix()
y = tt.vector()
f = aesara.function([x, y], x + y, mode=mode)
# test that it runs something
f([[1, 2], [3, 4]], [5, 6])
linker_classes_involved.append(f.maker.mode.linker.__class__)
# print 'MODE:', mode, f.maker.mode.linker, 'stop'
# regression check:
# there should be
# - VM_Linker
# - OpWiseCLinker (FAST_RUN)
# - PerformLinker (FAST_COMPILE)
# - DebugMode's Linker (DEBUG_MODE)
assert 4 == len(set(linker_classes_involved))
class TestOldModesProblem:
def test_modes(self):
# Then, build a mode with the same linker, and a modified optimizer
default_mode = aesara.compile.mode.get_default_mode()
modified_mode = default_mode.including("specialize")
# The following line used to fail, with Python 2.4, in July 2012,
# because an fgraph was associated to the default linker
copy.deepcopy(modified_mode)
# More straightforward test
linker = aesara.compile.mode.get_default_mode().linker
assert not hasattr(linker, "fgraph") or linker.fgraph is None
| 33.465517
| 85
| 0.613086
|
8a98c32d2195fce1071749f7cca317dbdad66352
| 8,422
|
py
|
Python
|
ironic/common/swift.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 350
|
2015-01-02T09:35:49.000Z
|
2022-03-28T09:25:59.000Z
|
ironic/common/swift.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 7
|
2015-05-04T16:12:41.000Z
|
2021-08-31T12:27:27.000Z
|
ironic/common/swift.py
|
yanndegat/ironic
|
8857ec76443dea7778bb9c0d66568304e52495e5
|
[
"Apache-2.0"
] | 333
|
2015-01-06T09:09:22.000Z
|
2022-02-20T08:11:40.000Z
|
#
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import client as http_client
from urllib import parse as urlparse
from swiftclient import client as swift_client
from swiftclient import exceptions as swift_exceptions
from swiftclient import utils as swift_utils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.conf import CONF
_SWIFT_SESSION = None
def get_swift_session():
global _SWIFT_SESSION
if not _SWIFT_SESSION:
auth = keystone.get_auth('swift')
_SWIFT_SESSION = keystone.get_session('swift', auth=auth)
return _SWIFT_SESSION
class SwiftAPI(object):
"""API for communicating with Swift."""
connection = None
"""Underlying Swift connection object."""
def __init__(self):
"""Initialize the connection with swift
:raises: ConfigInvalid if required keystone authorization credentials
with swift are missing.
"""
params = {'retries': CONF.swift.swift_max_retries}
# NOTE(pas-ha) swiftclient still (as of 3.3.0) does not use
# (adapter-based) SessionClient, and uses the passed in session
# only to resolve endpoint and get a token,
# but not to make further requests to Swift itself (LP 1736135).
# Thus we need to deconstruct back all the adapter- and
# session-related args as loaded by keystoneauth from config
# to pass them to the client explicitly.
# TODO(pas-ha) re-write this when swiftclient is brought on par
# with other OS clients re auth plugins, sessions and adapters
# support.
# TODO(pas-ha) pass the context here and use token from context
# with service auth
params['session'] = session = get_swift_session()
endpoint = keystone.get_endpoint('swift', session=session)
params['os_options'] = {'object_storage_url': endpoint}
# deconstruct back session-related options
params['timeout'] = session.timeout
if session.verify is False:
params['insecure'] = True
elif isinstance(session.verify, str):
params['cacert'] = session.verify
if session.cert:
# NOTE(pas-ha) although setting cert as path to single file
# with both client cert and key is supported by Session,
# keystoneauth loading always sets the session.cert
# as tuple of cert and key.
params['cert'], params['cert_key'] = session.cert
self.connection = swift_client.Connection(**params)
def create_object(self, container, obj, filename,
object_headers=None):
"""Uploads a given file to Swift.
:param container: The name of the container for the object.
:param obj: The name of the object in Swift
:param filename: The file to upload, as the object data
:param object_headers: the headers for the object to pass to Swift
:returns: The Swift UUID of the object
:raises: SwiftOperationError, if any operation with Swift fails.
"""
try:
self.connection.put_container(container)
except swift_exceptions.ClientException as e:
operation = _("put container")
raise exception.SwiftOperationError(operation=operation, error=e)
with open(filename, "rb") as fileobj:
try:
obj_uuid = self.connection.put_object(container,
obj,
fileobj,
headers=object_headers)
except swift_exceptions.ClientException as e:
operation = _("put object")
raise exception.SwiftOperationError(operation=operation,
error=e)
return obj_uuid
def get_temp_url(self, container, obj, timeout):
"""Returns the temp url for the given Swift object.
:param container: The name of the container in which Swift object
is placed.
:param obj: The name of the Swift object.
:param timeout: The timeout in seconds after which the generated url
should expire.
:returns: The temp url for the object.
:raises: SwiftOperationError, if any operation with Swift fails.
"""
try:
account_info = self.connection.head_account()
except swift_exceptions.ClientException as e:
operation = _("head account")
raise exception.SwiftOperationError(operation=operation,
error=e)
parse_result = urlparse.urlparse(self.connection.url)
swift_object_path = '/'.join((parse_result.path, container, obj))
temp_url_key = account_info.get('x-account-meta-temp-url-key')
if not temp_url_key:
raise exception.MissingParameterValue(_(
'Swift temporary URLs require a shared secret to be '
'created. You must provide pre-generate the key on '
'the project used to access Swift.'))
url_path = swift_utils.generate_temp_url(swift_object_path, timeout,
temp_url_key, 'GET')
return urlparse.urlunparse(
(parse_result.scheme, parse_result.netloc, url_path,
None, None, None))
def delete_object(self, container, obj):
"""Deletes the given Swift object.
:param container: The name of the container in which Swift object
is placed.
:param obj: The name of the object in Swift to be deleted.
:raises: SwiftObjectNotFoundError, if object is not found in Swift.
:raises: SwiftOperationError, if operation with Swift fails.
"""
try:
self.connection.delete_object(container, obj)
except swift_exceptions.ClientException as e:
operation = _("delete object")
if e.http_status == http_client.NOT_FOUND:
raise exception.SwiftObjectNotFoundError(obj=obj,
container=container,
operation=operation)
raise exception.SwiftOperationError(operation=operation, error=e)
def head_object(self, container, obj):
"""Retrieves the information about the given Swift object.
:param container: The name of the container in which Swift object
is placed.
:param obj: The name of the object in Swift
:returns: The information about the object as returned by
Swift client's head_object call.
:raises: SwiftOperationError, if operation with Swift fails.
"""
try:
return self.connection.head_object(container, obj)
except swift_exceptions.ClientException as e:
operation = _("head object")
raise exception.SwiftOperationError(operation=operation, error=e)
def update_object_meta(self, container, obj, object_headers):
"""Update the metadata of a given Swift object.
:param container: The name of the container in which Swift object
is placed.
:param obj: The name of the object in Swift
:param object_headers: the headers for the object to pass to Swift
:raises: SwiftOperationError, if operation with Swift fails.
"""
try:
self.connection.post_object(container, obj, object_headers)
except swift_exceptions.ClientException as e:
operation = _("post object")
raise exception.SwiftOperationError(operation=operation, error=e)
| 42.969388
| 78
| 0.631323
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.