hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6707dd7b43e33c316be804768ef020a089466983
| 14,107
|
py
|
Python
|
visionpack/stable_baselines3/common/off_policy_algorithm.py
|
joeljosephjin/gvgai-rl
|
57281629c313abb43312950b22d043a3d67639cf
|
[
"Apache-2.0"
] | null | null | null |
visionpack/stable_baselines3/common/off_policy_algorithm.py
|
joeljosephjin/gvgai-rl
|
57281629c313abb43312950b22d043a3d67639cf
|
[
"Apache-2.0"
] | null | null | null |
visionpack/stable_baselines3/common/off_policy_algorithm.py
|
joeljosephjin/gvgai-rl
|
57281629c313abb43312950b22d043a3d67639cf
|
[
"Apache-2.0"
] | null | null | null |
import time
import os
import pickle
import warnings
from typing import Union, Type, Optional, Dict, Any, Callable
import gym
import torch as th
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.utils import safe_mean
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.common.type_aliases import GymEnv, RolloutReturn
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.buffers import ReplayBuffer
class OffPolicyAlgorithm(BaseAlgorithm):
"""
The base for Off-Policy algorithms (ex: SAC/TD3)
:param policy: Policy object
:param env: The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: The base policy used by this method
:param learning_rate: (float or callable) learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param batch_size: (int) Minibatch size for each gradient update
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use State Dependent Exploration (SDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: (bool) Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param sde_support: (bool) Whether the model support gSDE or not
"""
def __init__(self,
policy: Type[BasePolicy],
env: Union[GymEnv, str],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Callable],
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 256,
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = 'auto',
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
sde_support: bool = True):
super(OffPolicyAlgorithm, self).__init__(policy=policy, env=env, policy_base=policy_base,
learning_rate=learning_rate, policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log, verbose=verbose,
device=device, support_multi_env=support_multi_env,
create_eval_env=create_eval_env, monitor_wrapper=monitor_wrapper,
seed=seed, use_sde=use_sde, sde_sample_freq=sde_sample_freq)
self.buffer_size = buffer_size
self.batch_size = batch_size
self.learning_starts = learning_starts
self.actor = None # type: Optional[th.nn.Module]
self.replay_buffer = None # type: Optional[ReplayBuffer]
# Update policy keyword arguments
if sde_support:
self.policy_kwargs['use_sde'] = self.use_sde
self.policy_kwargs['device'] = self.device
# For gSDE only
self.use_sde_at_warmup = use_sde_at_warmup
def _setup_model(self):
self._setup_lr_schedule()
self.set_random_seed(self.seed)
self.replay_buffer = ReplayBuffer(self.buffer_size, self.observation_space,
self.action_space, self.device)
self.policy = self.policy_class(self.observation_space, self.action_space,
self.lr_schedule, **self.policy_kwargs)
self.policy = self.policy.to(self.device)
def save_replay_buffer(self, path: str):
"""
Save the replay buffer as a pickle file.
:param path: (str) Path to a log folder
"""
assert self.replay_buffer is not None, "The replay buffer is not defined"
with open(os.path.join(path, 'replay_buffer.pkl'), 'wb') as file_handler:
pickle.dump(self.replay_buffer, file_handler)
def load_replay_buffer(self, path: str):
"""
Load a replay buffer from a pickle file.
:param path: (str) Path to the pickled replay buffer.
"""
with open(path, 'rb') as file_handler:
self.replay_buffer = pickle.load(file_handler)
assert isinstance(self.replay_buffer, ReplayBuffer), 'The replay buffer must inherit from ReplayBuffer class'
def collect_rollouts(self, # noqa: C901
env: VecEnv,
# Type hint as string to avoid circular import
callback: 'BaseCallback',
n_episodes: int = 1,
n_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
replay_buffer: Optional[ReplayBuffer] = None,
log_interval: Optional[int] = None) -> RolloutReturn:
"""
Collect experiences and store them into a ReplayBuffer.
:param env: (VecEnv) The training environment
:param callback: (BaseCallback) Callback that will be called at each step
(and at the beginning and end of the rollout)
:param n_episodes: (int) Number of episodes to use to collect rollout data
You can also specify a ``n_steps`` instead
:param n_steps: (int) Number of steps to use to collect rollout data
You can also specify a ``n_episodes`` instead.
:param action_noise: (Optional[ActionNoise]) Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: (int) Number of steps before learning for the warm-up phase.
:param replay_buffer: (ReplayBuffer)
:param log_interval: (int) Log data every ``log_interval`` episodes
:return: (RolloutReturn)
"""
episode_rewards, total_timesteps = [], []
total_steps, total_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment"
if n_episodes > 0 and n_steps > 0:
# Note we are refering to the constructor arguments
# that are named `train_freq` and `n_episodes_rollout`
# but correspond to `n_steps` and `n_episodes` here
warnings.warn("You passed a positive value for `train_freq` and `n_episodes_rollout`."
"Please make sure this is intended. "
"The agent will collect data by stepping in the environment "
"until both conditions are true: "
"`number of steps in the env` >= `train_freq` and "
"`number of episodes` > `n_episodes_rollout`")
if self.use_sde:
self.actor.reset_noise()
callback.on_rollout_start()
continue_training = True
while total_steps < n_steps or total_episodes < n_episodes:
done = False
episode_reward, episode_timesteps = 0.0, 0
while not done:
if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.actor.reset_noise()
# Select action randomly or according to policy
if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
# Warmup phase
unscaled_action = np.array([self.action_space.sample()])
else:
# Note: we assume that the policy uses tanh to scale the action
# We use non-deterministic action in the case of SAC, for TD3, it does not matter
unscaled_action, _ = self.predict(self._last_obs, deterministic=False)
# Rescale the action from [low, high] to [-1, 1]
if isinstance(self.action_space, gym.spaces.Box):
scaled_action = self.policy.scale_action(unscaled_action)
# Add noise to the action (improve exploration)
if action_noise is not None:
# NOTE: in the original implementation of TD3, the noise was applied to the unscaled action
# Update(October 2019): Not anymore
scaled_action = np.clip(scaled_action + action_noise(), -1, 1)
# We store the scaled action in the buffer
buffer_action = scaled_action
action = self.policy.unscale_action(scaled_action)
else:
# Discrete case, no need to normalize or clip
buffer_action = unscaled_action
action = buffer_action
# Rescale and perform action
new_obs, reward, done, infos = env.step(action)
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False)
episode_reward += reward
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, done)
# Store data in replay buffer
if replay_buffer is not None:
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs()
reward_ = self._vec_normalize_env.get_original_reward()
else:
# Avoid changing the original ones
self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward
replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done)
self._last_obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
self._last_original_obs = new_obs_
self.num_timesteps += 1
episode_timesteps += 1
total_steps += 1
if 0 < n_steps <= total_steps:
break
if done:
total_episodes += 1
self._episode_num += 1
episode_rewards.append(episode_reward)
total_timesteps.append(episode_timesteps)
if action_noise is not None:
action_noise.reset()
# Log training infos
if log_interval is not None and self._episode_num % log_interval == 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.record("time/episodes", self._episode_num, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
logger.record('rollout/ep_rew_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buffer]))
logger.record('rollout/ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buffer]))
logger.record("time/fps", fps)
logger.record('time/time_elapsed', int(time.time() - self.start_time), exclude="tensorboard")
logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard")
if self.use_sde:
logger.record("train/std", (self.actor.get_std()).mean().item())
if len(self.ep_success_buffer) > 0:
logger.record('rollout/success rate', safe_mean(self.ep_success_buffer))
# Pass the number of timesteps for tensorboard
logger.dump(step=self.num_timesteps)
mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0
callback.on_rollout_end()
return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training)
| 50.382143
| 122
| 0.607571
| 13,426
| 0.951726
| 0
| 0
| 0
| 0
| 0
| 0
| 5,290
| 0.374991
|
6707dda4f20fd2cb10f818588c5b114047a6d11c
| 2,743
|
py
|
Python
|
src/oscar/apps/dashboard/app.py
|
frmdstryr/django-oscar
|
32bf8618ebb688df6ba306dc7703de8e61b4e78c
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/apps/dashboard/app.py
|
frmdstryr/django-oscar
|
32bf8618ebb688df6ba306dc7703de8e61b4e78c
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/apps/dashboard/app.py
|
frmdstryr/django-oscar
|
32bf8618ebb688df6ba306dc7703de8e61b4e78c
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
from oscar.core.application import (
DashboardApplication as BaseDashboardApplication)
from oscar.core.loading import get_class
class DashboardApplication(BaseDashboardApplication):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
system_app = get_class('dashboard.system.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', self.catalogue_app.urls),
url(r'^reports/', self.reports_app.urls),
url(r'^orders/', self.orders_app.urls),
url(r'^users/', self.users_app.urls),
url(r'^content-blocks/', self.promotions_app.urls),
url(r'^pages/', self.pages_app.urls),
url(r'^partners/', self.partners_app.urls),
url(r'^offers/', self.offers_app.urls),
url(r'^ranges/', self.ranges_app.urls),
url(r'^reviews/', self.reviews_app.urls),
url(r'^vouchers/', self.vouchers_app.urls),
url(r'^comms/', self.comms_app.urls),
url(r'^shipping/', self.shipping_app.urls),
url(r'^system/', self.system_app.urls),
url(r'^login/$',
auth_views.LoginView.as_view(template_name='dashboard/login.html',
authentication_form=AuthenticationForm),
name='login'),
url(r'^logout/$', auth_views.LogoutView.as_view(next_page='/'), name='logout'),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| 44.967213
| 91
| 0.654028
| 2,426
| 0.884433
| 0
| 0
| 0
| 0
| 0
| 0
| 838
| 0.305505
|
67081cebddc67151d15ce739da186891614e2d4d
| 4,783
|
py
|
Python
|
wedding/migrations/0004_auto_20170407_2017.py
|
chadgates/thetravelling2
|
3646d64acb0fbf5106066700f482c9013f5fb7d0
|
[
"MIT"
] | null | null | null |
wedding/migrations/0004_auto_20170407_2017.py
|
chadgates/thetravelling2
|
3646d64acb0fbf5106066700f482c9013f5fb7d0
|
[
"MIT"
] | null | null | null |
wedding/migrations/0004_auto_20170407_2017.py
|
chadgates/thetravelling2
|
3646d64acb0fbf5106066700f482c9013f5fb7d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-07 20:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wedding', '0003_auto_20170214_1543'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CartItem',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('amount', models.PositiveIntegerField(verbose_name='Item count')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Gift',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=300, verbose_name='Name')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('link', models.TextField(blank=True, null=True, verbose_name='Link')),
('price', models.DecimalField(decimal_places=2, max_digits=7, verbose_name='Price')),
('gift_is_part', models.BooleanField(default=False, verbose_name='Gift is part')),
('max_parts', models.PositiveIntegerField(verbose_name='Maximum number of parts')),
('taken_parts', models.PositiveIntegerField(default=0, verbose_name='Number of parts taken')),
('img', models.ImageField(blank=True, null=True, upload_to='')),
],
options={
'verbose_name': 'Gift',
'verbose_name_plural': 'Gifts',
},
),
migrations.CreateModel(
name='GiftOrder',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('voucher_from', models.CharField(max_length=300, verbose_name='Voucher is from')),
('voucher_greeting', models.TextField(blank=True, null=True, verbose_name='Voucher Greeting')),
('voucher_senddirect', models.BooleanField(default=False, verbose_name='Send voucher directly')),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GiftOrderItem',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('quantity', models.PositiveIntegerField(verbose_name='Item count')),
('gift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.Gift')),
('giftorder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.GiftOrder')),
],
options={
'abstract': False,
},
),
migrations.AlterModelOptions(
name='rsvp',
options={'permissions': (('view_list', 'Can see the RSVP list'),), 'verbose_name': 'RSVP', 'verbose_name_plural': 'RSVPs'},
),
migrations.AddField(
model_name='cartitem',
name='gift',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wedding.Gift'),
),
]
| 46.892157
| 135
| 0.582061
| 4,547
| 0.950659
| 0
| 0
| 0
| 0
| 0
| 0
| 874
| 0.182731
|
6708d69bfe7f1ec1d25240a2e512900542ce4a78
| 820
|
py
|
Python
|
taskonomy/utils/log_utils.py
|
shikhar-srivastava/hover_net
|
d4e8e129a4ad72f5d574a78c036449b496421529
|
[
"MIT"
] | null | null | null |
taskonomy/utils/log_utils.py
|
shikhar-srivastava/hover_net
|
d4e8e129a4ad72f5d574a78c036449b496421529
|
[
"MIT"
] | null | null | null |
taskonomy/utils/log_utils.py
|
shikhar-srivastava/hover_net
|
d4e8e129a4ad72f5d574a78c036449b496421529
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pickle
def read_metric_logs(bucket_type):
metrics = pd.DataFrame(columns=['source_type', 'target_type', 'stats'])
type_list_path = f'/l/users/shikhar.srivastava/data/pannuke/{bucket_type}/selected_types.csv'
type_list = pd.read_csv(type_list_path)['0']
for source_type in type_list:
for target_type in type_list:
logs_path = f'/l/users/shikhar.srivastava/workspace/hover_net/logs/test/second_order/{bucket_type}/ckpts/{source_type}-{target_type}/per_image_stat.pkl'
# Read pickle file
with open(logs_path, 'rb') as f:
stats = pickle.load(f)
metrics = metrics.append({'source_type': source_type, 'target_type': target_type, 'stats': stats}, ignore_index=True)
return metrics, type_list
| 43.157895
| 164
| 0.684146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 307
| 0.37439
|
6709a543eab8bce61601cfd76117d243faac013b
| 5,373
|
py
|
Python
|
train_DEU.py
|
JosephineRabbit/MLMSNet
|
755e07afd1c19797b02cf88b7bbb195112ffec77
|
[
"MIT"
] | 61
|
2019-04-23T15:17:36.000Z
|
2021-08-20T15:48:11.000Z
|
train_DEU.py
|
zhuxinang/MLMSNet
|
a824a70fa37aeb4536bc72d8032e871328c687e8
|
[
"MIT"
] | 8
|
2019-05-04T04:38:26.000Z
|
2020-08-16T15:15:15.000Z
|
train_DEU.py
|
JosephineRabbit/MLMSNet
|
755e07afd1c19797b02cf88b7bbb195112ffec77
|
[
"MIT"
] | 7
|
2019-06-12T07:02:06.000Z
|
2020-09-20T02:37:36.000Z
|
from D_E_U import *
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),config.BATCH_SIZE).cuda()
U = D_U().cuda()
U.cuda()
data_dirs = [
("/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Image",
"/home/rabbit/Datasets/DUTS/DUT-train/DUT-train-Mask"),
]
test_dirs = [("/home/rabbit/Datasets/SED1/SED1-Image",
"/home/rabbit/Datasets/SED1/SED1-Mask")]
D_E.base.load_state_dict(torch.load('/home/rabbit/Desktop/DUT_train/weights/vgg16_feat.pth'))
initialize_weights(U)
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE, betas=(0.5, 0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
BCE_loss = torch.nn.BCELoss().cuda()
def process_data_dir(data_dir):
files = os.listdir(data_dir)
files = map(lambda x: os.path.join(data_dir, x), files)
return sorted(files)
batch_size =BATCH_SIZE
DATA_DICT = {}
IMG_FILES = []
GT_FILES = []
IMG_FILES_TEST = []
GT_FILES_TEST = []
for dir_pair in data_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES.extend(X)
GT_FILES.extend(y)
for dir_pair in test_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
IMG_FILES_TEST.extend(X)
GT_FILES_TEST.extend(y)
IMGS_train, GT_train = IMG_FILES, GT_FILES
train_folder = DataFolder(IMGS_train, GT_train, True)
train_data = DataLoader(train_folder, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True,
drop_last=True)
test_folder = DataFolder(IMG_FILES_TEST, GT_FILES_TEST, trainable=False)
test_data = DataLoader(test_folder, batch_size=1, num_workers=NUM_WORKERS, shuffle=False)
def cal_DLoss(out_m,out_e, mask, edge):
# if l == 0:
# 0 f 1 t
# ll = Variable(torch.ones(mask.shape()))
D_masks_loss = 0
D_edges_loss = 0
for i in range(6):
#print(out_m[i].size())
#print(mask.size())
D_masks_loss += F.binary_cross_entropy(out_m[i], mask)
for i in range(6):
D_edges_loss += F.binary_cross_entropy(out_e[i], edge)
return ( D_masks_loss, D_edges_loss)
best_eval = None
x = 0
ma = 1
for epoch in range(1, config.NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
sum_train_gan = 0
##train
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(train_data):
D_E.train()
x = x + 1
# print(img_batch.size())
label_batch = Variable(label_batch).cuda()
# print(torch.typename(label_batch))
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
edges = Variable(edges).cuda()
##########DSS#########################
######train dis
##fake
f,y1,y2 = D_E(img_batch)
m_l_1,e_l_1 = cal_DLoss(y1,y2,label_batch,edges)
DE_optimizer.zero_grad()
DE_l_1 = m_l_1 +e_l_1
DE_l_1.backward()
DE_optimizer.step()
w = [2,2,3,3]
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4],label_batch)
for i in range(4):
pre_ms_l +=w[i] * F.binary_cross_entropy(masks[i],label_batch)
DE_optimizer.zero_grad()
DE_l_1 = pre_ms_l/20+30*pre_m_l
DE_l_1.backward()
DE_optimizer.step()
f, y1, y2 = D_E(img_batch)
masks,DIC = U(f)
pre_ms_l = 0
ma = torch.abs(label_batch-masks[4]).mean()
pre_m_l = F.binary_cross_entropy(masks[4], label_batch)
for i in range(4):
pre_ms_l += w[i] * F.binary_cross_entropy(masks[i], label_batch)
U_optimizer.zero_grad()
U_l_1 = pre_ms_l/20+30*pre_m_l
U_l_1.backward()
U_optimizer.step()
sum_train_mae += ma.data.cpu()
print("Epoch:{}\t {}/{}\ \t mae:{}".format(epoch, iter_cnt + 1,
len(train_folder) / config.BATCH_SIZE,
sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), './checkpoint/DSS/with_e_2/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), './checkpoint/DSS/with_e_2/Uis.pkl')
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img_batch, label_batch, edges, shape, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(label_batch).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img_batch.cuda()) # ,Variable(z_.cuda())
f,y1,y2 = D_E(img_batch)
masks, DIC = U(f)
mae_v2 = torch.abs(label_batch - masks[4]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
| 24.760369
| 116
| 0.594826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 972
| 0.180905
|
670ac7303b14fc10b0803485c55a62a568d00950
| 445
|
py
|
Python
|
solutions/1497_check_if_array_pairs_are_divisible_by_k.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/1497_check_if_array_pairs_are_divisible_by_k.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/1497_check_if_array_pairs_are_divisible_by_k.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
class Solution:
def canArrange(self, arr: List[int], k: int) -> bool:
"""Hash table.
Running time: O(n) where n == len(arr).
"""
d = collections.defaultdict(int)
for a in arr:
d[a % k] += 1
for key, v in d.items():
if key == 0 and v % 2 == 1:
return False
elif key != 0 and v != d[k - key]:
return False
return True
| 27.8125
| 57
| 0.442697
| 444
| 0.997753
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.168539
|
670bfcaeeccc178a263df62b6b3d972d4904cdc0
| 5,122
|
py
|
Python
|
machine-learning-ex2/ex2/ex2.py
|
DuffAb/coursera-ml-py
|
efcfb0847ac7d1e181cb6b93954b0176ce6162d4
|
[
"MIT"
] | null | null | null |
machine-learning-ex2/ex2/ex2.py
|
DuffAb/coursera-ml-py
|
efcfb0847ac7d1e181cb6b93954b0176ce6162d4
|
[
"MIT"
] | null | null | null |
machine-learning-ex2/ex2/ex2.py
|
DuffAb/coursera-ml-py
|
efcfb0847ac7d1e181cb6b93954b0176ce6162d4
|
[
"MIT"
] | null | null | null |
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
def cost_func(t):
return cf.cost_function(t, X, y)[0]
def grad_func(t):
return cf.cost_function(t, X, y)[1]
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary 画出二分边界
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
| 34.608108
| 128
| 0.689184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,497
| 0.677976
|
670c1bac34e09541ccb5d179f3199b3e5c901751
| 2,866
|
py
|
Python
|
tests/test_apiFunc.py
|
Reid1923/py-GoldsberryTest
|
3c7e9e2f4ef75720e1a13c4c41018a2072487ddd
|
[
"MIT"
] | null | null | null |
tests/test_apiFunc.py
|
Reid1923/py-GoldsberryTest
|
3c7e9e2f4ef75720e1a13c4c41018a2072487ddd
|
[
"MIT"
] | null | null | null |
tests/test_apiFunc.py
|
Reid1923/py-GoldsberryTest
|
3c7e9e2f4ef75720e1a13c4c41018a2072487ddd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytest
import goldsberry
test_data = [
(goldsberry._nbaLeague, 'NBA', '00'),
(goldsberry._nbaLeague, 'WNBA', '10'),
(goldsberry._nbaLeague, 'NBADL', '20'),
(goldsberry._nbaSeason, 1999, '1999-00'),
(goldsberry._nbaSeason, 2000, '2000-01'),
(goldsberry._seasonID, 1999, '21999'),
(goldsberry._measureType, 1, 'Base'),
(goldsberry._measureType, 2, 'Advanced'),
(goldsberry._Scope, 1, ''),
(goldsberry._PerModeSmall48, 1, 'Totals'),
(goldsberry._PerModeSmall36, 1, 'Totals'),
(goldsberry._PerModeMini, 1, 'Totals'),
(goldsberry._PerModeLarge, 1, 'Totals'),
(goldsberry._AheadBehind, 1, 'Ahead or Behind'),
(goldsberry._ClutchTime, 1, 'Last 5 Minutes'),
(goldsberry._GameScope, 2, 'Yesterday'),
(goldsberry._PlayerExperience, 2, 'Rookie'),
(goldsberry._PlayerPosition, 2, 'F'),
(goldsberry._StarterBench, 2, 'Starters'),
(goldsberry._PlusMinus, 2, 'Y'),
(goldsberry._PaceAdjust, 2, 'Y'),
(goldsberry._Rank, 2, 'Y'),
(goldsberry._SeasonType, 1, 'Regular Season'),
(goldsberry._SeasonType4, 1, 'Regular Season'),
(goldsberry._Outcome, 2, 'W'),
(goldsberry._Location, 2, 'Home'),
(goldsberry._SeasonSegment, 2, 'Post All-Star'),
(goldsberry._VsConference, 2, 'East'),
(goldsberry._VsDivision, 2, 'Atlantic'),
(goldsberry._GameSegment, 2, 'First Half'),
(goldsberry._DistanceRange, 1, '5ft Range'),
(goldsberry._valiDate, '', ''),
(goldsberry._valiDate, '2015-01-02', '2015-01-02'),
(goldsberry._ContextMeasure, 1, 'FGM'),
(goldsberry._Position, 2, 'Guard'),
(goldsberry._StatCategory, 1, 'MIN'),
]
@pytest.mark.parametrize("func,key,response", test_data)
def test_api_func(func, key, response):
assert func(key) == response
@pytest.mark.parametrize('func,key', [
(goldsberry._nbaLeague, 'BAD VALUE'),
(goldsberry._nbaSeason, -1),
(goldsberry._seasonID, -1),
(goldsberry._measureType, -1),
(goldsberry._Scope, -1),
(goldsberry._PerModeSmall48, -1),
(goldsberry._PerModeSmall36, -1),
(goldsberry._PerModeMini, -1),
(goldsberry._PerModeLarge, -1),
(goldsberry._AheadBehind, -1),
(goldsberry._ClutchTime, -1),
(goldsberry._GameScope, -1),
(goldsberry._PlayerExperience, -1),
(goldsberry._PlayerPosition, -1),
(goldsberry._StarterBench, -1),
(goldsberry._PlusMinus, 0),
(goldsberry._PaceAdjust, 0),
(goldsberry._Rank, 0),
(goldsberry._SeasonType, 0),
(goldsberry._SeasonType4, 0),
(goldsberry._Outcome, 0),
(goldsberry._Location, 0),
(goldsberry._SeasonSegment, 0),
(goldsberry._VsConference, 0),
(goldsberry._VsDivision, 0),
(goldsberry._GameSegment, 0),
(goldsberry._DistanceRange, 0),
(goldsberry._valiDate, 'date'),
(goldsberry._ContextMeasure, 0),
(goldsberry._Position, 0),
(goldsberry._StatCategory, 0)
])
def test_api_func_raises_valueerror(func, key):
with pytest.raises(ValueError):
func(key)
| 33.717647
| 56
| 0.691207
| 0
| 0
| 0
| 0
| 1,338
| 0.466853
| 0
| 0
| 388
| 0.13538
|
670d0a8e1a1197c9ec69df947dabd43d08e4160b
| 4,295
|
py
|
Python
|
sasmodels/models/poly_gauss_coil.py
|
zattala/sasmodels
|
a547aa73d43145b3bd34770b0ea27ba8882170a3
|
[
"BSD-3-Clause"
] | null | null | null |
sasmodels/models/poly_gauss_coil.py
|
zattala/sasmodels
|
a547aa73d43145b3bd34770b0ea27ba8882170a3
|
[
"BSD-3-Clause"
] | null | null | null |
sasmodels/models/poly_gauss_coil.py
|
zattala/sasmodels
|
a547aa73d43145b3bd34770b0ea27ba8882170a3
|
[
"BSD-3-Clause"
] | null | null | null |
#poly_gauss_coil model
#conversion of Poly_GaussCoil.py
#converted by Steve King, Mar 2016
r"""
This empirical model describes the scattering from *polydisperse* polymer
chains in theta solvents or polymer melts, assuming a Schulz-Zimm type
molecular weight distribution.
To describe the scattering from *monodisperse* polymer chains, see the
:ref:`mono-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\
P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\
Z &= [(q R_g)^2] / (1 + 2U) \\
U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the
volume of a polymer coil, $M$ is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the $q$ vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
.. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404
.. [#] J S Higgins, H C Benoit, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996)
.. [#] S M King, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999)
.. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, expm1, power
name = "poly_gauss_coil"
title = "Scattering from polydisperse polymer coils"
description = """
Evaluates the scattering from
polydisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
def Iq(q, i_zero, rg, polydispersity):
# pylint: disable = missing-docstring
u = polydispersity - 1.0
z = q**2 * (rg**2 / (1.0 + 2.0*u))
# need to trap the case of the polydispersity being 1 (ie, monodisperse!)
if polydispersity == 1.0:
result = 2.0 * (expm1(-z) + z)
index = q != 0.
result[index] /= z[index]**2
result[~index] = 1.0
else:
# Taylor series around z=0 of (2*(1+uz)^(-1/u) + z - 1) / (z^2(u+1))
p = [
#(-1 - 20*u - 155*u**2 - 580*u**3 - 1044*u**4 - 720*u**5) / 2520.,
#(+1 + 14*u + 71*u**2 + 154*u**3 + 120*u**4) / 360.,
#(-1 - 9*u - 26*u**2 - 24*u**3) / 60.,
(+1 + 5*u + 6*u**2) / 12.,
(-1 - 2*u) / 3.,
(+1),
]
result = 2.0 * (power(1.0 + u*z, -1.0/u) + z - 1.0) / (1.0 + u)
index = z > 1e-4
result[index] /= z[index]**2
result[~index] = np.polyval(p, z[~index])
return i_zero * result
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
rg = 10**np.random.uniform(0, 4)
#rg = 1e3
polydispersity = 10**np.random.uniform(0, 3)
pars = dict(
#scale=1, background=0,
i_zero=1e7, # i_zero is a simple scale
rg=rg,
polydispersity=polydispersity,
)
return pars
demo = dict(scale=1.0,
i_zero=70.0,
rg=75.0,
polydispersity=2.0,
background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0,
'polydispersity': 2.0, 'background': 0.0},
[0.0106939, 0.469418], [57.6405, 0.169016]],
]
| 32.293233
| 116
| 0.584633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,890
| 0.672875
|
670fa5323287fc9c400ddc9fd03e291ab3a5896f
| 4,939
|
py
|
Python
|
examples/information_extraction/msra_ner/eval.py
|
BenfengXu/PaddleNLP
|
eca87fde4a1814a8f028e0e900d1792cbaa5c700
|
[
"Apache-2.0"
] | 1
|
2021-07-22T08:33:53.000Z
|
2021-07-22T08:33:53.000Z
|
examples/information_extraction/msra_ner/eval.py
|
BenfengXu/PaddleNLP
|
eca87fde4a1814a8f028e0e900d1792cbaa5c700
|
[
"Apache-2.0"
] | null | null | null |
examples/information_extraction/msra_ner/eval.py
|
BenfengXu/PaddleNLP
|
eca87fde4a1814a8f028e0e900d1792cbaa5c700
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import ast
import random
import time
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
import paddlenlp as ppnlp
from paddlenlp.datasets import load_dataset
from paddlenlp.data import Stack, Tuple, Pad, Dict
from paddlenlp.transformers import BertForTokenClassification, BertTokenizer
from paddlenlp.metrics import ChunkEvaluator
parser = argparse.ArgumentParser()
# yapf: disable
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(list(BertTokenizer.pretrained_init_configuration.keys())))
parser.add_argument("--init_checkpoint_path", default=None, type=str, required=True, help="The model checkpoint path.", )
parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", )
parser.add_argument("--batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.", )
parser.add_argument("--device", default="gpu", type=str, choices=["cpu", "gpu", "xpu"] ,help="The device to select to train the model, is must be cpu/gpu/xpu.")
# yapf: enable
def tokenize_and_align_labels(example, tokenizer, no_entity_id,
max_seq_len=512):
labels = example['labels']
example = example['tokens']
tokenized_input = tokenizer(
example,
return_length=True,
is_split_into_words=True,
max_seq_len=max_seq_len)
# -2 for [CLS] and [SEP]
if len(tokenized_input['input_ids']) - 2 < len(labels):
labels = labels[:len(tokenized_input['input_ids']) - 2]
tokenized_input['labels'] = [no_entity_id] + labels + [no_entity_id]
tokenized_input['labels'] += [no_entity_id] * (
len(tokenized_input['input_ids']) - len(tokenized_input['labels']))
return tokenized_input
def do_eval(args):
paddle.set_device(args.device)
# Create dataset, tokenizer and dataloader.
train_ds, eval_ds = load_dataset(
'msra_ner', splits=('train', 'test'), lazy=False)
tokenizer = BertTokenizer.from_pretrained(args.model_name_or_path)
label_list = train_ds.label_list
label_num = len(label_list)
no_entity_id = label_num - 1
trans_func = partial(
tokenize_and_align_labels,
tokenizer=tokenizer,
no_entity_id=no_entity_id,
max_seq_len=args.max_seq_length)
ignore_label = -100
batchify_fn = lambda samples, fn=Dict({
'input_ids': Pad(axis=0, pad_val=tokenizer.pad_token_id), # input
'token_type_ids': Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment
'seq_len': Stack(),
'labels': Pad(axis=0, pad_val=ignore_label) # label
}): fn(samples)
eval_ds = eval_ds.map(trans_func)
eval_data_loader = DataLoader(
dataset=eval_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=args.batch_size,
return_list=True)
# Define the model netword and its loss
model = BertForTokenClassification.from_pretrained(
args.model_name_or_path, num_classes=label_num)
if args.init_checkpoint_path:
model_dict = paddle.load(args.init_checkpoint_path)
model.set_dict(model_dict)
loss_fct = paddle.nn.loss.CrossEntropyLoss(ignore_index=ignore_label)
metric = ChunkEvaluator(label_list=label_list)
model.eval()
metric.reset()
for step, batch in enumerate(eval_data_loader):
input_ids, token_type_ids, length, labels = batch
logits = model(input_ids, token_type_ids)
loss = loss_fct(logits, labels)
avg_loss = paddle.mean(loss)
preds = logits.argmax(axis=2)
num_infer_chunks, num_label_chunks, num_correct_chunks = metric.compute(
length, preds, labels)
metric.update(num_infer_chunks.numpy(),
num_label_chunks.numpy(), num_correct_chunks.numpy())
precision, recall, f1_score = metric.accumulate()
print("eval loss: %f, precision: %f, recall: %f, f1: %f" %
(avg_loss, precision, recall, f1_score))
if __name__ == "__main__":
args = parser.parse_args()
do_eval(args)
| 39.512
| 226
| 0.70905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,409
| 0.28528
|
670fb8129b5e60d52265e167fb8a005a31688d39
| 14,814
|
py
|
Python
|
src/python/module/z5py/util.py
|
constantinpape/z5
|
20e364cc614b744a0ee3cb733531c4b872839721
|
[
"MIT"
] | 82
|
2018-02-02T04:03:49.000Z
|
2022-03-25T07:41:08.000Z
|
src/python/module/z5py/util.py
|
constantinpape/z5
|
20e364cc614b744a0ee3cb733531c4b872839721
|
[
"MIT"
] | 152
|
2017-09-18T15:49:05.000Z
|
2022-03-16T21:07:07.000Z
|
src/python/module/z5py/util.py
|
constantinpape/z5
|
20e364cc614b744a0ee3cb733531c4b872839721
|
[
"MIT"
] | 27
|
2017-09-19T14:52:56.000Z
|
2021-11-25T14:43:47.000Z
|
import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def product1d(inrange):
for ii in inrange:
yield ii
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
def write_single_block(bb):
data_in = ds_in[bb].astype(dtype, copy=False)
if np.sum(data_in) == 0:
return
if fit_to_roi and roi is not None:
bb = tuple(slice(b.start - rr.start, b.stop - rr.start)
for b, rr in zip(bb, roi))
ds_out[bb] = data_in
def write_single_chunk(bb):
chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
chunk_in = ds_in.read_chunk(chunk_id)
if chunk_in is None:
return
# check if this is a varlen chunk
varlen = tuple(chunk_in.shape) != tuple(b.stop - b.start for b in bb)
ds_out.write_chunk(chunk_id, chunk_in.astype(dtype, copy=False), varlen)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
def copy_attrs(gin, gout):
in_attrs = gin.attrs
out_attrs = gout.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
def copy_object(name, obj):
abs_in_key = os.path.join(in_path_in_file, name)
abs_out_key = os.path.join(out_path_in_file, name)
if isinstance(obj, Dataset):
copy_dataset(in_path, out_path,
abs_in_key, abs_out_key, n_threads)
else:
g = f_out.require_group(abs_out_key)
copy_attrs(obj, g)
g_in.visititems(copy_object)
class Timer:
def __init__(self):
self.start_time = None
self.stop_time = None
@property
def elapsed(self):
try:
return (self.stop_time - self.start_time).total_seconds()
except TypeError as e:
if "'NoneType'" in str(e):
raise RuntimeError("{} either not started, or not stopped".format(self))
def start(self):
self.start_time = datetime.utcnow()
def stop(self):
self.stop_time = datetime.utcnow()
return self.elapsed
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def fetch_test_data_stent():
from imageio import volread
data_i16 = volread('imageio:stent.npz')
return (data_i16 / data_i16.max() * 255).astype(np.uint8)
def fetch_test_data():
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO as Buffer
except ImportError:
from StringIO import StringIO as Buffer
import zipfile
from imageio import volread
im_url = "https://imagej.nih.gov/ij/images/t1-head-raw.zip"
with closing(urlopen(im_url)) as response:
if response.status != 200:
raise RuntimeError("Test data could not be found at {}, status code {}".format(
im_url, response.status
))
zip_buffer = Buffer(response.read())
with zipfile.ZipFile(zip_buffer) as zf:
tif_buffer = Buffer(zf.read('JeffT1_le.tif'))
return np.asarray(volread(tif_buffer, format='tif'), dtype=np.uint8)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
| 37.887468
| 120
| 0.645876
| 674
| 0.045498
| 2,561
| 0.172877
| 274
| 0.018496
| 0
| 0
| 5,755
| 0.388484
|
671044f92c1e2bb7a547bce5cdc307d31e50194b
| 8,485
|
py
|
Python
|
custom_components/waste_collection_schedule/sensor.py
|
trstns/hacs_waste_collection_schedule
|
f8f297b43c8e87510e17a558347a88a95f790d7b
|
[
"MIT"
] | null | null | null |
custom_components/waste_collection_schedule/sensor.py
|
trstns/hacs_waste_collection_schedule
|
f8f297b43c8e87510e17a558347a88a95f790d7b
|
[
"MIT"
] | null | null | null |
custom_components/waste_collection_schedule/sensor.py
|
trstns/hacs_waste_collection_schedule
|
f8f297b43c8e87510e17a558347a88a95f790d7b
|
[
"MIT"
] | null | null | null |
"""Sensor platform support for Waste Collection Schedule."""
import collections
import datetime
import logging
from enum import Enum
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_VALUE_TEMPLATE, STATE_UNKNOWN
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
_LOGGER = logging.getLogger(__name__)
CONF_SOURCE_INDEX = "source_index"
CONF_DETAILS_FORMAT = "details_format"
CONF_COUNT = "count"
CONF_LEADTIME = "leadtime"
CONF_DATE_TEMPLATE = "date_template"
CONF_APPOINTMENT_TYPES = "types"
class DetailsFormat(Enum):
"""Values for CONF_DETAILS_FORMAT."""
upcoming = "upcoming" # list of "<date> <type1, type2, ...>"
appointment_types = "appointment_types" # list of "<type> <date>"
generic = "generic" # all values in separate attributes
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SOURCE_INDEX, default=0): cv.positive_int,
vol.Optional(CONF_DETAILS_FORMAT, default="upcoming"): cv.enum(DetailsFormat),
vol.Optional(CONF_COUNT): cv.positive_int,
vol.Optional(CONF_LEADTIME): cv.positive_int,
vol.Optional(CONF_APPOINTMENT_TYPES): cv.ensure_list,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_DATE_TEMPLATE): cv.template,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
date_template = config.get(CONF_DATE_TEMPLATE)
if date_template is not None:
date_template.hass = hass
entities = []
entities.append(
ScheduleSensor(
hass=hass,
api=hass.data[DOMAIN],
name=config[CONF_NAME],
source_index=config[CONF_SOURCE_INDEX],
details_format=config[CONF_DETAILS_FORMAT],
count=config.get(CONF_COUNT),
leadtime=config.get(CONF_LEADTIME),
appointment_types=config.get(CONF_APPOINTMENT_TYPES),
value_template=value_template,
date_template=date_template,
)
)
async_add_entities(entities)
class ScheduleSensor(Entity):
"""Base for sensors."""
def __init__(
self,
hass,
api,
name,
source_index,
details_format,
count,
leadtime,
appointment_types,
value_template,
date_template,
):
"""Initialize the entity."""
self._api = api
self._name = name
self._source_index = source_index
self._details_format = details_format
self._count = count
self._leadtime = leadtime
self._appointment_types = appointment_types
self._value_template = value_template
self._date_template = date_template
self._state = STATE_UNKNOWN
self._icon = None
self._picture = None
self._attributes = []
async_dispatcher_connect(hass, UPDATE_SENSORS_SIGNAL, self._update_sensor)
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._name
@property
def should_poll(self):
return False
@property
def icon(self):
return "mdi:trash-can" if self._icon is None else self._icon
@property
def entity_picture(self):
return self._picture
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def device_state_attributes(self):
"""Return attributes for the entity."""
return self._attributes
async def async_added_to_hass(self):
"""Entities have been added to hass."""
self._update_sensor()
@property
def _scraper(self):
return self._api.get_scraper(self._source_index)
@property
def _separator(self):
"""Return separator string used to join waste types."""
return self._api.separator
@property
def _include_today(self):
"""Return true if appointments for today shall be included in the results."""
return datetime.datetime.now().time() < self._api._day_switch_time
def _add_refreshtime(self):
"""Add refresh-time (= last fetch time) to device-state-attributes."""
refreshtime = ""
if self._scraper.refreshtime is not None:
refreshtime = self._scraper.refreshtime.strftime("%x %X")
self._attributes["attribution"] = f"Last update: {refreshtime}"
def _set_state(self, upcoming):
"""Set entity state with default format."""
if len(upcoming) == 0:
self._state = ""
self._icon = None
self._picture = None
return
appointment = upcoming[0]
# appointment::=CollectionAppointmentGroup{date=2020-04-01, types=['Type1', 'Type2']}
if self._value_template is not None:
self._state = self._value_template.async_render_with_possible_json_value(
appointment, None
)
else:
self._state = f"{self._separator.join(appointment.types)} in {appointment.daysTo} days"
self._icon = appointment.icon
self._picture = appointment.picture
def _render_date(self, appointment):
if self._date_template is not None:
return self._date_template.async_render_with_possible_json_value(
appointment, None
)
else:
return appointment.date.isoformat()
@callback
def _update_sensor(self):
"""Update the state and the device-state-attributes of the entity.
Called if a new data has been fetched from the scraper source.
"""
if self._scraper is None:
_LOGGER.error(f"source_index {self._source_index} out of range")
return None
self._set_state(
self._scraper.get_upcoming_group_by_day(
count=1,
types=self._appointment_types,
include_today=self._include_today,
)
)
attributes = collections.OrderedDict()
appointment_types = (
sorted(self._scraper.get_types())
if self._appointment_types is None
else self._appointment_types
)
if self._details_format == DetailsFormat.upcoming:
# show upcoming events list in details
upcoming = self._scraper.get_upcoming_group_by_day(
count=self._count,
leadtime=self._leadtime,
types=self._appointment_types,
include_today=self._include_today,
)
for appointment in upcoming:
attributes[self._render_date(appointment)] = self._separator.join(
appointment.types
)
elif self._details_format == DetailsFormat.appointment_types:
# show list of appointments in details
for t in appointment_types:
appointments = self._scraper.get_upcoming(
count=1, types=[t], include_today=self._include_today
)
date = (
"" if len(appointments) == 0 else self._render_date(appointments[0])
)
attributes[t] = date
elif self._details_format == DetailsFormat.generic:
# insert generic attributes into details
attributes["types"] = appointment_types
attributes["upcoming"] = self._scraper.get_upcoming(
count=self._count,
leadtime=self._leadtime,
types=self._appointment_types,
include_today=self._include_today,
)
refreshtime = ""
if self._scraper.refreshtime is not None:
refreshtime = self._scraper.refreshtime.isoformat(timespec="seconds")
attributes["last_update"] = refreshtime
self._attributes = attributes
self._add_refreshtime()
if self.hass is not None:
self.async_schedule_update_ha_state()
| 32.140152
| 99
| 0.635357
| 6,263
| 0.738126
| 0
| 0
| 3,594
| 0.423571
| 1,000
| 0.117855
| 1,307
| 0.154037
|
671186e2f94db3759070c3a35c61ae043b2efdd5
| 2,622
|
py
|
Python
|
qidian.py
|
kivson/qidian-dl
|
9b42f4c530b7938ff80f160ef32aa51cc43671f6
|
[
"MIT"
] | null | null | null |
qidian.py
|
kivson/qidian-dl
|
9b42f4c530b7938ff80f160ef32aa51cc43671f6
|
[
"MIT"
] | null | null | null |
qidian.py
|
kivson/qidian-dl
|
9b42f4c530b7938ff80f160ef32aa51cc43671f6
|
[
"MIT"
] | null | null | null |
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from json import JSONDecodeError
import requests
from funcy.calc import cache
from funcy.debug import print_calls
from funcy.simple_funcs import curry
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/58.0.3029.110 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
HOME_URL = "https://www.webnovel.com/"
class QidianException(Exception):
pass
@cache(60)
def _get_csrftoken():
response = requests.get(HOME_URL)
return response.cookies.get('_csrfToken', None)
def novels():
for page in range(1, 10000):
response = requests.get("https://www.webnovel.com/apiajax/listing/popularAjax", headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'category': '',
'pageIndex': page
})
data = _response_to_json(response)
if 'data' not in data or 'items' not in data['data'] or 'isLast' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['items']
if data['data']['isLast'] == 1:
break
def _response_to_json(response):
try:
data = response.json()
except JSONDecodeError:
raise QidianException('Json parse Error')
return data
def charpters_list(bookId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetChapterList', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterItems' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['chapterItems']
def chapter(bookId, chapterId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetContent', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId,
'chapterId': chapterId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterInfo' not in data['data']:
raise QidianException('Expected data not found')
return data['data']['chapterInfo']
def all_chapters(bookId, poolsize=10):
charpters = charpters_list(bookId=bookId)
with ThreadPoolExecutor(max_workers=poolsize) as executor:
chapter_getter = partial(chapter, bookId)
yield from executor.map(chapter_getter, (c['chapterId'] for c in charpters))
| 29.133333
| 113
| 0.666667
| 42
| 0.016018
| 1,276
| 0.486651
| 122
| 0.046529
| 0
| 0
| 764
| 0.291381
|
6712802d8a80e0d4a1dc7de07b3fd9bb724b208d
| 4,398
|
py
|
Python
|
srcWatteco/TICs/_poubelle/TIC_ICEp.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T15:46:58.000Z
|
2022-01-12T15:46:58.000Z
|
srcWatteco/TICs/_poubelle/TIC_ICEp.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | null | null | null |
srcWatteco/TICs/_poubelle/TIC_ICEp.py
|
OStephan29/Codec-Python
|
76d651bb23daf1d9307c8b84533d9f24a59cea28
|
[
"BSD-3-Clause"
] | 1
|
2021-10-05T08:40:15.000Z
|
2021-10-05T08:40:15.000Z
|
# -*- coding: utf-8 -*-
# Pour passer de TICDataXXXFromBitfields @ TICDataBatchXXXFromFieldIndex
# Expressions régulière notepad++
# Find : TICDataSelectorIfBit\( ([0-9]*), Struct\("([^\"]*)"\/([^\)]*).*
# Replace: \1 : \3, # \2
from ._TIC_Tools import *
from ._TIC_Types import *
TICDataICEpFromBitfields = Struct(
TICDataSelectorIfBit( 0, Struct("DEBUTp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 1, Struct("FINp"/TYPE_DMYhms)),
TICDataSelectorIfBit( 2, Struct("CAFp"/Int16ub) ),
TICDataSelectorIfBit( 3, Struct("DATE_EAp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 4, Struct("EApP"/Int24ub) ),
TICDataSelectorIfBit( 5, Struct("EApPM"/Int24ub) ),
TICDataSelectorIfBit( 6, Struct("EApHCE"/Int24ub) ),
TICDataSelectorIfBit( 7, Struct("EApHCH"/Int24ub) ),
TICDataSelectorIfBit( 8, Struct("EApHH"/Int24ub) ),
TICDataSelectorIfBit( 9, Struct("EApHCD"/Int24ub) ),
TICDataSelectorIfBit( 10, Struct("EApHD"/Int24ub) ),
TICDataSelectorIfBit( 11, Struct("EApJA"/Int24ub) ),
TICDataSelectorIfBit( 12, Struct("EApHPE"/Int24ub) ),
TICDataSelectorIfBit( 13, Struct("EApHPH"/Int24ub) ),
TICDataSelectorIfBit( 14, Struct("EApHPD"/Int24ub) ),
TICDataSelectorIfBit( 15, Struct("EApSCM"/Int24ub) ),
TICDataSelectorIfBit( 16, Struct("EApHM"/Int24ub) ),
TICDataSelectorIfBit( 17, Struct("EApDSM"/Int24ub) ),
TICDataSelectorIfBit( 18, Struct("DATE_ERPp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 19, Struct("ERPpP"/Int24ub) ),
TICDataSelectorIfBit( 20, Struct("ERPpPM"/Int24ub) ),
TICDataSelectorIfBit( 21, Struct("ERPpHCE"/Int24ub) ),
TICDataSelectorIfBit( 22, Struct("ERPpHCH"/Int24ub) ),
TICDataSelectorIfBit( 23, Struct("ERPpHH"/Int24ub) ),
TICDataSelectorIfBit( 24, Struct("ERPpHCD"/Int24ub) ),
TICDataSelectorIfBit( 25, Struct("ERPpHD"/Int24ub) ),
TICDataSelectorIfBit( 26, Struct("ERPpJA"/Int24ub) ),
TICDataSelectorIfBit( 27, Struct("ERPpHPE"/Int24ub) ),
TICDataSelectorIfBit( 28, Struct("ERPpHPH"/Int24ub) ),
TICDataSelectorIfBit( 29, Struct("ERPpHPD"/Int24ub) ),
TICDataSelectorIfBit( 30, Struct("ERPpSCM"/Int24ub) ),
TICDataSelectorIfBit( 31, Struct("ERPpHM"/Int24ub) ),
TICDataSelectorIfBit( 32, Struct("ERPpDSM"/Int24ub) ),
TICDataSelectorIfBit( 33, Struct("DATE_ERNp"/TYPE_DMYhms) ),
TICDataSelectorIfBit( 34, Struct("ERNpP"/Int24ub) ),
TICDataSelectorIfBit( 35, Struct("ERNpPM"/Int24ub) ),
TICDataSelectorIfBit( 36, Struct("ERNpHCE"/Int24ub) ),
TICDataSelectorIfBit( 37, Struct("ERNpHCH"/Int24ub) ),
TICDataSelectorIfBit( 38, Struct("ERNpHH"/Int24ub) ),
TICDataSelectorIfBit( 39, Struct("ERNpHCD"/Int24ub) ),
TICDataSelectorIfBit( 40, Struct("ERNpHD"/Int24ub) ),
TICDataSelectorIfBit( 41, Struct("ERNpJA"/Int24ub) ),
TICDataSelectorIfBit( 42, Struct("ERNpHPE"/Int24ub) ),
TICDataSelectorIfBit( 43, Struct("ERNpHPH"/Int24ub) ),
TICDataSelectorIfBit( 44, Struct("ERNpHPD"/Int24ub) ),
TICDataSelectorIfBit( 45, Struct("ERNpSCM"/Int24ub) ),
TICDataSelectorIfBit( 46, Struct("ERNpHM"/Int24ub) ),
TICDataSelectorIfBit( 47, Struct("ERNpDSM"/Int24ub) )
)
# NOTE: For Batch only scalar/numeric values are accepeted
TICDataBatchICEpFromFieldIndex = Switch( FindFieldIndex,
{
#0 : TYPE_DMYhms, # DEBUTp
#1 : TYPE_DMYhms, # FINp
2 : Int16ub, # CAFp
#3 : TYPE_DMYhms, # DATE_EAp
4 : Int24ub, # EApP
5 : Int24ub, # EApPM
6 : Int24ub, # EApHCE
7 : Int24ub, # EApHCH
8 : Int24ub, # EApHH
9 : Int24ub, # EApHCD
10 : Int24ub, # EApHD
11 : Int24ub, # EApJA
12 : Int24ub, # EApHPE
13 : Int24ub, # EApHPH
14 : Int24ub, # EApHPD
15 : Int24ub, # EApSCM
16 : Int24ub, # EApHM
17 : Int24ub, # EApDSM
#18 : TYPE_DMYhms, # DATE_ERPp
19 : Int24ub, # ERPpP
20 : Int24ub, # ERPpPM
21 : Int24ub, # ERPpHCE
22 : Int24ub, # ERPpHCH
23 : Int24ub, # ERPpHH
24 : Int24ub, # ERPpHCD
25 : Int24ub, # ERPpHD
26 : Int24ub, # ERPpJA
27 : Int24ub, # ERPpHPE
28 : Int24ub, # ERPpHPH
29 : Int24ub, # ERPpHPD
30 : Int24ub, # ERPpSCM
31 : Int24ub, # ERPpHM
32 : Int24ub, # ERPpDSM
#33 : TYPE_DMYhms, # DATE_ERNp
34 : Int24ub, # ERNpP
35 : Int24ub, # ERNpPM
36 : Int24ub, # ERNpHCE
37 : Int24ub, # ERNpHCH
38 : Int24ub, # ERNpHH
39 : Int24ub, # ERNpHCD
40 : Int24ub, # ERNpHD
41 : Int24ub, # ERNpJA
42 : Int24ub, # ERNpHPE
43 : Int24ub, # ERNpHPH
44 : Int24ub, # ERNpHPD
45 : Int24ub, # ERNpSCM
46 : Int24ub, # ERNpHM
47 : Int24ub, # ERNpDSM
}, default = TICUnbatchableFieldError()
)
| 33.572519
| 74
| 0.698272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,169
| 0.265682
|
6714f1b0e63e554da53c6d95c385058b29428db0
| 2,095
|
py
|
Python
|
tests/test_check_types.py
|
oliel/python-ovirt-engine-sdk4
|
c0b13982b45dee664ebc063bda7686124b402c14
|
[
"Apache-2.0"
] | 3
|
2022-01-14T00:37:58.000Z
|
2022-03-26T12:26:32.000Z
|
tests/test_check_types.py
|
oliel/python-ovirt-engine-sdk4
|
c0b13982b45dee664ebc063bda7686124b402c14
|
[
"Apache-2.0"
] | 29
|
2021-07-20T12:42:44.000Z
|
2022-03-28T13:01:33.000Z
|
tests/test_check_types.py
|
oliel/python-ovirt-engine-sdk4
|
c0b13982b45dee664ebc063bda7686124b402c14
|
[
"Apache-2.0"
] | 12
|
2021-07-20T12:27:07.000Z
|
2022-02-24T11:10:12.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ovirtsdk4.services as services
import ovirtsdk4.types as types
import unittest
from nose.tools import (
assert_in,
assert_raises,
)
from .server import TestServer
class CheckTypesTest(unittest.TestCase):
def test_service_type_error(self):
"""
Test that calling a method with multiple wrong parameter types
generates an informative exception.
"""
vm_service = services.VmService(None, None)
with assert_raises(TypeError) as context:
vm_service.start(
use_cloud_init='true',
vm=types.Disk(),
)
message = str(context.exception)
assert_in(
"The 'use_cloud_init' parameter should be of type 'bool', "
"but it is of type 'str'",
message
)
assert_in(
"The 'vm' parameter should be of type 'Vm', but it is of "
"type 'Disk'",
message
)
def test_locator_type_error(self):
"""
Test that calling a service locator with a wrong parameter type
generates an informative exception.
"""
vms_service = services.VmsService(None, None)
with assert_raises(TypeError) as context:
vms_service.vm_service(types.Vm())
message = str(context.exception)
assert_in(
"The 'id' parameter should be of type 'str', but it is of "
"type 'Vm'.",
message
)
| 30.362319
| 74
| 0.630072
| 1,304
| 0.622434
| 0
| 0
| 0
| 0
| 0
| 0
| 1,084
| 0.517422
|
6715015a823d4efe629d554c1f06e22bd2b8c5e4
| 7,518
|
py
|
Python
|
nsi/shell.py
|
NextStepInnovation/nsi-tools
|
ee4c9a9e512a2fb4942699d88920bc8210a3d701
|
[
"MIT"
] | null | null | null |
nsi/shell.py
|
NextStepInnovation/nsi-tools
|
ee4c9a9e512a2fb4942699d88920bc8210a3d701
|
[
"MIT"
] | null | null | null |
nsi/shell.py
|
NextStepInnovation/nsi-tools
|
ee4c9a9e512a2fb4942699d88920bc8210a3d701
|
[
"MIT"
] | null | null | null |
import os
import io
import sys
import subprocess
import shlex
import logging
from threading import Timer
from typing import Callable, Any, List
from pathlib import Path # noqa: for doctest
import tempfile # noqa: for doctest
from .toolz import (
merge, map, pipe, curry, do, cprint
)
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def start_timeout(command: List[str], process: subprocess.Popen,
timeout: int):
# https://www.blog.pythonlibrary.org/2016/05/17/python-101-how-to-timeout-a-subprocess/
def kill():
log.warning(f'Process ({command[0]}) timeout expired.')
return process.kill()
timer = Timer(timeout, kill)
timer.start()
return timer
@curry
def shell_iter(command, *, echo: bool = True,
echo_func: Callable[[Any], None] = cprint(file=sys.stderr,
end=''),
timeout: int = None, **popen_kw):
'''Execute a shell command, yield lines of output as they come
possibly echoing command output to a given echo_func, and finally
yields the status code of the process.
This will run the shell command, yielding each line of output as
it runs. When the process terminates, it will then yield the
remainder of output, then finally the integer status code. It can
also be terminated early via a timeout parameter. By default, the
command will also echo to stderr.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
echo (bool): Should the output be echoed to echo_func in
addition to yielding lines of output?
echo_func (Callable[[Any], None]): Function to use when echoing
output. **Be warned**, this function is called __for each
character__ of output. By default, this is `cprint(end='')`
(i.e. print with end='')
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Returns: generator of the form
*output_lines, status_code = shell_iter(...)
where output_lines is a sequence of strings of output and
status_code is an integer status code
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... # FYI, this echos to stderr, which doctests won't capture
... *lines, status = shell_iter(f'ls {root}')
>>> lines
['a.txt', 'b.txt']
>>> status
0
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'c.txt').write_text('')
... _ = Path(root, 'd.txt').write_text('')
... *lines, _ = shell_iter(f'ls {root}', echo=False)
>>> lines
['c.txt', 'd.txt']
>>> *lines, status = shell_iter(
... f'sleep 5', echo=False, timeout=0.01
... )
>>> lines
[]
>>> status
-9
'''
popen_kw = merge({
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
}, popen_kw)
command_split = pipe(
shlex.split(command),
map(os.path.expanduser),
map(os.path.expandvars),
tuple,
)
process = subprocess.Popen(command_split, **popen_kw)
timer = None
if timeout:
timer = start_timeout(command_split, process, timeout)
def process_running():
return process.poll() is None
line = ''
while process_running():
char = process.stdout.read(1).decode('utf-8', errors='ignore')
if char:
echo_func(char) if echo else ''
if char == '\n':
yield line
line = ''
else:
line += char
if timer:
timer.cancel()
rest = process.stdout.read().decode('utf-8', errors='ignore')
for char in rest:
echo_func(char) if echo else ''
if char == '\n':
yield line
line = ''
else:
line += char
if line:
echo_func(char) if echo else ''
yield line
yield process.poll()
@curry
def shell(command, **kw):
'''Execute a shell command and return status code as an int and
command output as a string, possibly echoing command output to a
given echo_func.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
echo (bool): Should the output be echoed to echo_func in
addition to yielding lines of output?
echo_func (Callable[[Any], None]): Function to use when echoing
output. **Be warned**, this funciton is called __for each
character__ of output. By default, this is `cprint(end='')`
(i.e. print with end='')
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... # FYI, this echos to stderr, which doctests won't capture
... status, output = shell(f'ls {root}')
>>> output == "a.txt\\nb.txt"
True
>>> status
0
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'c.txt').write_text('')
... _ = Path(root, 'd.txt').write_text('')
... _, output = shell(f'ls {root}', echo=False)
>>> output == 'c.txt\\nd.txt'
True
>>> status, output = shell(
... f'sleep 5', echo=False, timeout=0.01
... )
>>> output == ""
True
>>> status
-9
'''
*lines, status = shell_iter(command, **kw)
return status, '\n'.join(lines)
@curry
def getoutput(command, **kw):
status, content = shell(command, **kw)
return content
@curry
def shell_pipe(command, stdin, *, timeout: int = None, **popen_kw):
'''Execute a shell command with stdin content and return command
output as a string.
Args:
command (str): Shell command to execute. Tilde (~) and shell
variable completion provided
stdin (str): String content to provide to process stdin
timeout (int): If set, the process will be killed after this
many seconds (kill -9).
Examples:
>>> with tempfile.TemporaryDirectory() as tempdir:
... root = Path(tempdir)
... _ = Path(root, 'a.txt').write_text('')
... _ = Path(root, 'b.txt').write_text('')
... _ = Path(root, 'ab.txt').write_text('')
... output = pipe(
... getoutput(f'ls {root}'),
... shell_pipe('grep a')
... )
>>> sorted(output.strip().split()) == ["a.txt", "ab.txt"]
True
'''
popen_kw = merge({
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'stdin': subprocess.PIPE,
}, popen_kw)
command_split = pipe(
shlex.split(command),
map(os.path.expanduser),
map(os.path.expandvars),
tuple,
)
process = subprocess.Popen(command_split, **popen_kw)
timer = None
if timeout:
timer = start_timeout()
stdout, stderr = process.communicate(
stdin.encode('utf-8', errors='ignore')
)
if timer:
timer.cancel()
return stdout.decode('utf-8', errors='ignore')
| 28.477273
| 91
| 0.580607
| 0
| 0
| 3,482
| 0.463155
| 6,757
| 0.898776
| 0
| 0
| 4,742
| 0.630753
|
6715fb7acc45572b00524312f06dff2708091d1d
| 8,934
|
py
|
Python
|
ICLR_2022/Cubic_10D/PIVEN/DataGen.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 11
|
2021-11-08T20:38:50.000Z
|
2022-01-30T02:46:39.000Z
|
ICLR_2022/Cubic_10D/PIVEN/DataGen.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1
|
2022-01-13T19:46:32.000Z
|
2022-02-09T16:23:56.000Z
|
ICLR_2022/Cubic_10D/PIVEN/DataGen.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1
|
2021-12-17T18:38:26.000Z
|
2021-12-17T18:38:26.000Z
|
"""
Data creation:
Load the data, normalize it, and split into train and test.
"""
'''
Added the capability of loading pre-separated UCI train/test data
function LoadData_Splitted_UCI
'''
import numpy as np
import os
import pandas as pd
import tensorflow as tf
DATA_PATH = "../UCI_Datasets"
class DataGenerator:
def __init__(self, dataset_name):
self.dataset_name = dataset_name
# used for metrics calculation
self.scale_c = None # std
self.shift_c = None # mean
def create_cubic_10D_data(self):
Npar = 10
Ntrain = 5000
Nout = 1
Ntest = 1000
# x_train = tf.random.uniform(shape=(Ntrain, Npar))*4.0-2.0
x_train = tf.random.normal(shape=(Ntrain, Npar))
y_train = x_train ** 3
y_train = tf.reduce_sum(y_train, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_train.shape[0], 1])
# x_test = tf.random.uniform(shape=(Ntest, Npar))
# x_test[:,1] = x_test[:,1] + 4.0
# x_test = np.random.uniform(size=(Ntest,Npar))
# x_test[:,1] = x_test[:,1] + 4.0
x_test = np.random.normal(size=(Ntest,Npar)) + 2.0
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32)
scale_c = np.std(x_test.eval(session=tf.compat.v1.Session()))
y_test = x_test ** 3
y_test = tf.reduce_sum(y_test, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_test.shape[0], 1])
### to Numpy array in TF1 compat environment using TF2
x_train = x_train.eval(session=tf.compat.v1.Session())
y_train = y_train.eval(session=tf.compat.v1.Session())
x_test = x_test.eval(session=tf.compat.v1.Session())
y_test = y_test.eval(session=tf.compat.v1.Session())
### normalization
x_mean = np.mean(x_train, axis=0)
x_std = np.std(x_train,axis=0)
xtrain_normal = (x_train - x_mean)/x_std
y_mean = np.mean(y_train,axis=0)
y_std = np.std(y_train,axis=0)
ytrain_normal = (y_train - y_mean)/y_std
xvalid_normal = (x_test - x_mean) / x_std
yvalid_normal = (y_test - y_mean) / y_std
X_train = xtrain_normal
y_train = ytrain_normal
X_val = xvalid_normal
y_val = yvalid_normal
self.scale_c = scale_c
return X_train, y_train, X_val, y_val
def create_data(self, seed_in=5, train_prop=0.9):
"""
@param seed_in: seed for numpy random seed
@param train_prop: train proportion
"""
np.random.seed(seed_in)
# load UCI data
dataset = self.dataset_name
dataset_path = f"{DATA_PATH}/{dataset}.txt"
if dataset == 'YearPredictionMSD':
data = np.loadtxt(dataset_path, delimiter=',')
elif dataset == 'naval':
data = np.loadtxt(dataset_path)
data = data[:, :-1] # have 2 y as GT, ignore last
else:
data = np.loadtxt(dataset_path)
# save normalization constants (used for calculating results)
if dataset == 'YearPredictionMSD':
scale_c = np.std(data[:, 0]) # in YearPredictionMSD, label's index = 0
shift_c = np.mean(data[:, 0])
else:
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalize data
for i in range(data.shape[1]):
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm # avoid zero variance features
data[:, i] = (data[:, i] - np.mean(data[:, i])) / sdev_norm
# split train test
if dataset == 'YearPredictionMSD':
# train: first 463,715 examples
# test: last 51,630 examples
train = data[:463715, :]
test = data[-51630:, :]
else:
# split into train/test in random
perm = np.random.permutation(data.shape[0])
train_size = int(round(train_prop * data.shape[0]))
train = data[perm[:train_size], :]
test = data[perm[train_size:], :]
# split to target and data
if dataset == 'YearPredictionMSD':
y_train = train[:, 0].reshape(-1, 1)
X_train = train[:, 1:]
y_val = test[:, 0].reshape(-1, 1)
X_val = test[:, 1:]
else:
y_train = train[:, -1].reshape(-1, 1)
X_train = train[:, :-1]
y_val = test[:, -1].reshape(-1, 1)
X_val = test[:, :-1]
self.scale_c = scale_c
self.shift_c = shift_c
return X_train, y_train, X_val, y_val
def LoadData_Splitted_UCI(self, loadCSVName, original_data_path, splitted_data_path, split_seed, **kwargs):
## (1) Load the original data for the normalization purpose
# current_dir = os.path.dirname(__file__)
# uci_dir = os.path.join(current_dir, 'UCI_datasets')
uci_dir = original_data_path
if loadCSVName == 'boston':
data = np.loadtxt(os.path.join(uci_dir, 'boston-housing/boston_housing.txt'))
if loadCSVName == 'concrete':
data_df = pd.read_excel(os.path.join(uci_dir, 'concrete/Concrete_Data.xls'))
data = data_df.values
if loadCSVName == 'energy':
data_df = pd.read_excel(os.path.join(uci_dir, 'energy-efficiency/ENB2012_data.xlsx'), engine='openpyxl')
data_df = data_df.dropna(how='all', axis='columns')
data_df = data_df.dropna(how='all', axis='rows')
data = data_df.values
if loadCSVName == 'kin8nm':
data_df = pd.read_csv(os.path.join(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv'), sep=',')
data = data_df.values
if loadCSVName == 'naval':
data = np.loadtxt(os.path.join(uci_dir, 'naval/data.txt'))
if loadCSVName == 'power':
data_df = pd.read_excel(os.path.join(uci_dir, 'power-plant/Folds5x2_pp.xlsx'), engine='openpyxl')
data = data_df.values
if loadCSVName == 'protein':
data_df = pd.read_csv(os.path.join(uci_dir, 'protein/CASP.csv'), sep=',')
# print(data_df)
'''Move the Y data (originally located at the first column) to last column in order to keep consistency
with the normalization process'''
col_names = data_df.columns.tolist()
col_names.append(col_names[0])
del col_names[col_names.index(col_names[0])]
# print(col_names)
data_df = data_df[col_names]
# print(data_df)
data = data_df.values
if loadCSVName == 'wine':
data_df = pd.read_csv(os.path.join(uci_dir, 'wine-quality/winequality-red.csv'), sep=';')
data = data_df.values
if loadCSVName == 'yacht':
data = np.loadtxt(os.path.join(uci_dir, 'yacht/yacht_hydrodynamics.data'))
if loadCSVName == 'MSD':
with open(os.path.join(uci_dir, 'song/YearPredictionMSD.npy'), 'rb') as f:
data = np.load(f)
## (2) Load the pre-splitted train/test data
##
xyTrain_load = np.loadtxt(splitted_data_path+'xyTrain_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTest_load = np.loadtxt(splitted_data_path+'xyTest_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTrain_load = xyTrain_load.astype(np.float32)
# xyValid_load = xyValid_load.astype(np.float32)
xyTest_load = xyTest_load.astype(np.float32)
# original normalization functions
# work out normalisation constants (need when unnormalising later)
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalise data
num_cols = xyTrain_load.shape[1]
print('num cols: {}'.format(num_cols))
for i in range(0, num_cols):
# get the sdev_norm from original data
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm
# apply on the pre-splitted data
xyTrain_load[:, i] = (xyTrain_load[:, i] - np.mean(data[:, i]) )/sdev_norm
xyTest_load[:, i] = (xyTest_load[:, i] - np.mean(data[:, i]) )/sdev_norm
# xyValid_load[:, i] = (xyValid_load[:, i] - np.mean(data[:, i]) )/sdev_norm
if loadCSVName == 'energy' or loadCSVName == 'naval':
xTrain = xyTrain_load[:, :-2] ## all columns except last two columns as inputs
yTrain = xyTrain_load[:, -1] ## last column as output
xTest = xyTest_load[:, :-2]
yTest = xyTest_load[:, -1]
else:
xTrain = xyTrain_load[:, :-1]
yTrain = xyTrain_load[:, -1]
xTest = xyTest_load[:, :-1]
yTest = xyTest_load[:, -1]
self.scale_c = scale_c
self.shift_c = shift_c
return xTrain, yTrain, xTest, yTest
| 36.317073
| 123
| 0.580479
| 8,630
| 0.965973
| 0
| 0
| 0
| 0
| 0
| 0
| 2,376
| 0.26595
|
67161d52650aa2e5bc2f66de7b2914c066936052
| 362
|
py
|
Python
|
after/config.py
|
mauvilsa/2021-config
|
870fd832bda269a1be7bfba32dd327df9987e74a
|
[
"MIT"
] | 5
|
2021-12-25T15:16:16.000Z
|
2022-03-19T09:04:39.000Z
|
after/config.py
|
ArjanCodes/2021-config
|
7c2c3babb0fb66d69eac81590356fae512c5e784
|
[
"MIT"
] | 1
|
2022-01-14T08:02:13.000Z
|
2022-01-14T08:02:13.000Z
|
after/config.py
|
mauvilsa/2021-config
|
870fd832bda269a1be7bfba32dd327df9987e74a
|
[
"MIT"
] | 1
|
2022-01-14T06:32:44.000Z
|
2022-01-14T06:32:44.000Z
|
from dataclasses import dataclass
@dataclass
class Paths:
log: str
data: str
@dataclass
class Files:
train_data: str
train_labels: str
test_data: str
test_labels: str
@dataclass
class Params:
epoch_count: int
lr: float
batch_size: int
@dataclass
class MNISTConfig:
paths: Paths
files: Files
params: Params
| 12.066667
| 33
| 0.679558
| 272
| 0.751381
| 0
| 0
| 316
| 0.872928
| 0
| 0
| 0
| 0
|
671650e9876f386bef01f59b8d08f601fc6d3ed8
| 14,103
|
py
|
Python
|
lab7/lab7.py
|
cudaczek/nlp-labs-2020
|
8e40fe04d2350c6e43a36b29f4428a34aedb6dea
|
[
"MIT"
] | null | null | null |
lab7/lab7.py
|
cudaczek/nlp-labs-2020
|
8e40fe04d2350c6e43a36b29f4428a34aedb6dea
|
[
"MIT"
] | null | null | null |
lab7/lab7.py
|
cudaczek/nlp-labs-2020
|
8e40fe04d2350c6e43a36b29f4428a34aedb6dea
|
[
"MIT"
] | null | null | null |
import pprint
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import manifold
from gensim.models import KeyedVectors
# Download polish word embeddings for word2vec github/Google drive:
# https://github.com/sdadas/polish-nlp-resources
# with 100 dimensionality
word2vec_100 = KeyedVectors.load("word2vec/word2vec_100_3_polish.bin")
# with 300 dimensionality
word2vec_300 = KeyedVectors.load("word2vec_300_3_polish/word2vec_300_3_polish.bin")
# Using the downloaded models find the most similar words for the following expressions...
# And display 5 most similar words according to each model:
# kpk
# szkoda
# wypadek
# kolizja
# nieszczęście
# rozwód
words = ['kpk', 'szkoda', 'wypadek', 'kolizja', 'nieszczęście', 'rozwód']
def get_most_similar_words(expression):
print(f"--------- Most similar words for {expression} ---------")
print("word2vec_100:")
result = word2vec_100.most_similar(positive=[expression])
pprint.pprint(result[:5])
print("word2vec_300:")
result = word2vec_300.most_similar(positive=[expression])
pprint.pprint(result[:5])
print()
for word in words:
get_most_similar_words(word)
# --------- Most similar words for kpk ---------
# word2vec_100:
# [('kilopond', 0.6665806770324707),
# ('kpzs', 0.6363496780395508),
# ('kpu', 0.6300562024116516),
# ('sownarkomu', 0.6254925727844238),
# ('wcik', 0.6224358677864075)]
# word2vec_300:
# [('ksh', 0.5774794220924377),
# ('cywilnego', 0.5498510599136353),
# ('postępowania', 0.5285828113555908),
# ('kilopond', 0.5151568055152893),
# ('kkkw', 0.48344212770462036)]
#
# --------- Most similar words for szkoda ---------
# word2vec_100:
# [('krzywda', 0.6817898750305176),
# ('pożytek', 0.6121943593025208),
# ('strata', 0.5968126654624939),
# ('ryzyko', 0.5745570659637451),
# ('uszczerbek', 0.5639551877975464)]
# word2vec_300:
# [('uszczerbek', 0.6027276515960693),
# ('krzywda', 0.5920778512954712),
# ('strata', 0.550269365310669),
# ('despekt', 0.5382484197616577),
# ('pożytek', 0.531347393989563)]
#
# --------- Most similar words for wypadek ---------
# word2vec_100:
# [('przypadek', 0.7544811964035034),
# ('okoliczności', 0.7268072366714478),
# ('padku', 0.6788284182548523),
# ('incydent', 0.6418948173522949),
# ('zdarzenie', 0.6114422082901001)]
# word2vec_300:
# [('przypadek', 0.7066895961761475),
# ('okoliczności', 0.6121077537536621),
# ('padku', 0.6056742072105408),
# ('padki', 0.5596078634262085),
# ('incydent', 0.5496981143951416)]
#
# --------- Most similar words for kolizja ---------
# word2vec_100:
# [('zderzenie', 0.8431548476219177),
# ('awaria', 0.7090569734573364),
# ('kraksa', 0.6777161359786987),
# ('turbulencja', 0.6613468527793884),
# ('poślizg', 0.6391660571098328)]
# word2vec_300:
# [('zderzenie', 0.7603178024291992),
# ('awaria', 0.611009955406189),
# ('kraksa', 0.5939033031463623),
# ('turbulencja', 0.5664489269256592),
# ('poślizg', 0.5569967031478882)]
#
# --------- Most similar words for nieszczęście ---------
# word2vec_100:
# [('niebezpieczeństwo', 0.7519958019256592),
# ('cierpienia', 0.7408335208892822),
# ('strapienie', 0.7345459461212158),
# ('cierpienie', 0.7262567281723022),
# ('utrapienie', 0.7251379489898682)]
# word2vec_300:
# [('utrapienie', 0.6610732674598694),
# ('cierpienia', 0.6526124477386475),
# ('niedola', 0.6478177309036255),
# ('strapienie', 0.6300181150436401),
# ('cierpienie', 0.6248573064804077)]
#
# --------- Most similar words for rozwód ---------
# word2vec_100:
# [('małżeństwo', 0.7646843194961548),
# ('separacja', 0.7547168135643005),
# ('adopcja', 0.7333694696426392),
# ('ślub', 0.7324203848838806),
# ('unieważnienie', 0.7096400856971741)]
# word2vec_300:
# [('separacja', 0.7053208351135254),
# ('małżeństwo', 0.6689504384994507),
# ('ślub', 0.6553219556808472),
# ('rozwodowy', 0.614338219165802),
# ('unieważnienie', 0.6127183437347412)]
# Find the most similar words for the following expressions (average the representations for each word):
# sąd najwyższy
# trybunał konstytucyjny
# szkoda majątkowy
# kodeks cywilny
# sąd rejonowy
# Display 7 most similar words according to each model.
expressions = ['sąd najwyższy', 'trybunał konstytucyjny', 'szkoda majątkowy', 'kodeks cywilny', 'sąd rejonowy']
def get_most_similiar_words_for_expression_avg(expressions):
for expr in expressions:
print(f"--------- Most similar words for {expr} ---------")
print("word2vec_100:")
word_1, word_2 = tuple(expr.split())
result = np.array([np.mean(k) for k in zip(np.array(word2vec_100[word_1]), np.array(word2vec_100[word_2]))])
pprint.pprint(word2vec_100.similar_by_vector(result)[:7])
print("word2vec_300:")
result = np.array([np.mean(k) for k in zip(np.array(word2vec_300[word_1]), np.array(word2vec_300[word_2]))])
pprint.pprint(word2vec_300.similar_by_vector(result)[:7])
print()
get_most_similiar_words_for_expression_avg(expressions)
# --------- Most similar words for sąd najwyższy ---------
# word2vec_100:
# [('sąd', 0.8644266128540039),
# ('trybunał', 0.7672435641288757),
# ('najwyższy', 0.7527138590812683),
# ('trybunat', 0.6843459010124207),
# ('sędzia', 0.6718415021896362),
# ('areopag', 0.6571060419082642),
# ('sprawiedliwość', 0.6562486886978149)]
# word2vec_300:
# [('sąd', 0.8261206150054932),
# ('trybunał', 0.711520791053772),
# ('najwyższy', 0.7068409323692322),
# ('sędzia', 0.6023203730583191),
# ('sądowy', 0.5670486688613892),
# ('trybunat', 0.5525928735733032),
# ('sprawiedliwość', 0.5319530367851257)]
#
# --------- Most similar words for trybunał konstytucyjny ---------
# word2vec_100:
# [('trybunał', 0.9073251485824585),
# ('konstytucyjny', 0.7998723387718201),
# ('sąd', 0.7972990274429321),
# ('bunał', 0.7729247808456421),
# ('senat', 0.7585273385047913),
# ('bunału', 0.7441976070404053),
# ('trybunat', 0.7347140908241272)]
# word2vec_300:
# [('trybunał', 0.8845913410186768),
# ('konstytucyjny', 0.7739969491958618),
# ('sąd', 0.7300779819488525),
# ('trybunat', 0.6758428812026978),
# ('senat', 0.6632090210914612),
# ('parlament', 0.6614581346511841),
# ('bunału', 0.6404117941856384)]
#
# --------- Most similar words for szkoda majątkowy ---------
# word2vec_100:
# [('szkoda', 0.8172438144683838),
# ('majątkowy', 0.7424530386924744),
# ('krzywda', 0.6498408317565918),
# ('świadczenie', 0.6419471502304077),
# ('odszkodowanie', 0.6392182111740112),
# ('dochód', 0.637932538986206),
# ('wydatek', 0.6325603127479553)]
# word2vec_300:
# [('szkoda', 0.7971925735473633),
# ('majątkowy', 0.7278684973716736),
# ('uszczerbek', 0.5841633081436157),
# ('korzyść', 0.5474051237106323),
# ('krzywda', 0.5431190729141235),
# ('majątek', 0.525060772895813),
# ('strata', 0.5228629112243652)]
#
# --------- Most similar words for kodeks cywilny ---------
# word2vec_100:
# [('kodeks', 0.8756389617919922),
# ('cywilny', 0.8532464504241943),
# ('pasztunwali', 0.6438998579978943),
# ('deksu', 0.6374959945678711),
# ('teodozjańskim', 0.6283917427062988),
# ('pozakodeksowy', 0.6153194904327393),
# ('sądowo', 0.6136723160743713)]
# word2vec_300:
# [('kodeks', 0.8212110996246338),
# ('cywilny', 0.7886406779289246),
# ('amiatyński', 0.5660314559936523),
# ('cywilnego', 0.5531740188598633),
# ('deksu', 0.5472918748855591),
# ('isps', 0.5369160175323486),
# ('jōei', 0.5361183881759644)]
#
# --------- Most similar words for sąd rejonowy ---------
# word2vec_100:
# [('sąd', 0.8773891925811768),
# ('prokuratura', 0.8396657705307007),
# ('rejonowy', 0.7694871425628662),
# ('trybunał', 0.755321204662323),
# ('sądowy', 0.7153753042221069),
# ('magistrat', 0.7151126861572266),
# ('prokurator', 0.7081375122070312)]
# word2vec_300:
# [('sąd', 0.8507211208343506),
# ('rejonowy', 0.7344856262207031),
# ('prokuratura', 0.711697518825531),
# ('trybunał', 0.6748420596122742),
# ('sądowy', 0.6426382064819336),
# ('okręgowy', 0.6349465847015381),
# ('apelacyjny', 0.599929690361023)]
# Find the result of the following equations (5 top results, both models):
# sąd + konstytucja - kpk
# pasażer + kobieta - mężczyzna
# pilot + kobieta - mężczyzna
# lekarz + kobieta - mężczyzna
# nauczycielka + mężczyzna - kobieta
# przedszkolanka + mężczyzna - 'kobieta
# samochód + rzeka - droga
equations = [(['sąd', 'konstytucja'], ['kpk']),
(['pasażer', 'kobieta'], ['mężczyzna']),
(['pilot', 'kobieta'], ['mężczyzna']),
(['lekarz', 'kobieta'], ['mężczyzna']),
(['nauczycielka', 'mężczyzna'], ['kobieta']),
(['przedszkolanka', 'mężczyzna'], ['kobieta']),
(['samochód', 'rzeka'], ['droga'])]
def get_result_of_equation(positive, negative):
print(f"--------- Result for + {positive} and - {negative} ---------")
print("word2vec_100:")
result = word2vec_100.most_similar(positive=positive, negative=negative)
pprint.pprint(result[:5])
print("word2vec_300:")
result = word2vec_300.most_similar(positive=positive, negative=negative)
pprint.pprint(result[:5])
print()
for equa in equations:
get_result_of_equation(equa[0], equa[1])
# --------- Result for + ['sąd', 'konstytucja'] and - ['kpk'] ---------
# word2vec_100:
# [('trybunał', 0.6436409950256348),
# ('ustawa', 0.6028786897659302),
# ('elekcja', 0.5823959112167358),
# ('deklaracja', 0.5771891474723816),
# ('dekret', 0.5759621262550354)]
# word2vec_300:
# [('trybunał', 0.5860734581947327),
# ('senat', 0.5112544298171997),
# ('ustawa', 0.5023636817932129),
# ('dekret', 0.48704710602760315),
# ('władza', 0.4868926703929901)]
#
# --------- Result for + ['pasażer', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('pasażerka', 0.7234811186790466),
# ('stewardessa', 0.6305270195007324),
# ('stewardesa', 0.6282645463943481),
# ('taksówka', 0.619726300239563),
# ('podróżny', 0.614517092704773)]
# word2vec_300:
# [('pasażerka', 0.6741673946380615),
# ('stewardesa', 0.5810248255729675),
# ('stewardessa', 0.5653151273727417),
# ('podróżny', 0.5060371160507202),
# ('pasażerski', 0.4896503686904907)]
#
# --------- Result for + ['pilot', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('nawigator', 0.6925703287124634),
# ('oblatywacz', 0.6686224937438965),
# ('lotnik', 0.6569937467575073),
# ('pilotka', 0.6518791913986206),
# ('awionetka', 0.6428645849227905)]
# word2vec_300:
# [('pilotka', 0.6108255386352539),
# ('lotnik', 0.6020804047584534),
# ('stewardesa', 0.5943204760551453),
# ('nawigator', 0.5849766731262207),
# ('oblatywacz', 0.5674178600311279)]
#
# --------- Result for + ['lekarz', 'kobieta'] and - ['mężczyzna'] ---------
# word2vec_100:
# [('lekarka', 0.7690489292144775),
# ('ginekolog', 0.7575511336326599),
# ('pediatra', 0.7478542923927307),
# ('psychiatra', 0.732271671295166),
# ('położna', 0.7268943786621094)]
# word2vec_300:
# [('lekarka', 0.7388788461685181),
# ('pielęgniarka', 0.6719920635223389),
# ('ginekolog', 0.658279299736023),
# ('psychiatra', 0.6389409303665161),
# ('chirurg', 0.6305986642837524)]
#
# --------- Result for + ['nauczycielka', 'mężczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('uczennica', 0.7441667318344116),
# ('studentka', 0.7274973392486572),
# ('nauczyciel', 0.7176114916801453),
# ('wychowawczyni', 0.7153530120849609),
# ('koleżanka', 0.678418755531311)]
# word2vec_300:
# [('nauczyciel', 0.6561620235443115),
# ('wychowawczyni', 0.6211140155792236),
# ('uczennica', 0.6142012476921082),
# ('koleżanka', 0.5501158237457275),
# ('przedszkolanka', 0.5497692823410034)]
#
# --------- Result for + ['przedszkolanka', 'mężczyzna'] and - ['kobieta'] ---------
# word2vec_100:
# [('stażysta', 0.6987776756286621),
# ('wychowawczyni', 0.6618361473083496),
# ('kreślarka', 0.6590923070907593),
# ('pielęgniarz', 0.6492814421653748),
# ('siedmiolatek', 0.6483469009399414)]
# word2vec_300:
# [('stażysta', 0.5117638111114502),
# ('pierwszoklasista', 0.49398648738861084),
# ('wychowawczyni', 0.49037522077560425),
# ('praktykant', 0.48884207010269165),
# ('pielęgniarz', 0.4795262813568115)]
#
# --------- Result for + ['samochód', 'rzeka'] and - ['droga'] ---------
# word2vec_100:
# [('jeep', 0.6142987608909607),
# ('buick', 0.5962571501731873),
# ('dżip', 0.5938510894775391),
# ('ponton', 0.580719530582428),
# ('landrower', 0.5799552202224731)]
# word2vec_300:
# [('dżip', 0.5567235946655273),
# ('jeep', 0.5533617734909058),
# ('auto', 0.5478508472442627),
# ('ciężarówka', 0.5461742281913757),
# ('wóz', 0.5204571485519409)]
# Using the t-SNE algorithm compute the projection of the random 1000 words with the following words highlighted (both models):
# szkoda
# strata
# uszczerbek
# krzywda
# niesprawiedliwość
# nieszczęście
# kobieta
# mężczyzna
# pasażer
# pasażerka
# student
# studentka
# lekarz
# lekarka
words = np.array(['szkoda', 'strata', 'uszczerbek', 'krzywda', 'niesprawiedliwość', 'nieszczęście', 'kobieta',
'mężczyzna', 'pasażer', 'pasażerka', 'student', 'studentka', 'lekarz', 'lekarka'])
def scatter_points(hue, point_labels, principal_components):
x = np.transpose(principal_components)[0]
y = np.transpose(principal_components)[1]
plt.scatter(x, y, c=hue, s=100, marker='o', alpha=0.2)
for i, text in enumerate(point_labels):
plt.annotate(text, (x[i], y[i]), ha="center", size=8)
def plot_with_tsne(wv, words, perplexity=30, learning_rate=100.0, iterations=1000, filename='slowa300'):
random_words = np.random.choice(list(wv.wv.vocab), 1000)
words = np.concatenate((words, random_words))
vecs = [wv[word] for word in words]
tsne = manifold.TSNE(2, perplexity=perplexity, learning_rate=learning_rate, n_iter=iterations)
results = tsne.fit_transform(vecs)
hue = [0 for _ in range(14)] + [1 for _ in range(1000)]
plt.figure(figsize=(30, 30))
scatter_points(hue, words, results)
plt.savefig(filename + '.png')
plt.show()
plt.clf()
wv = word2vec_300
plot_with_tsne(wv, words)
wv = word2vec_100
plot_with_tsne(wv, words)
| 33.901442
| 127
| 0.667801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,121
| 0.7796
|
671762a970ef464f89d67b583ec5b5c7d9146820
| 1,427
|
py
|
Python
|
Nimbus-Controller/sqs-fastreader.py
|
paulfdoyle/NIMBUS
|
0f309b620c00a9438c55404e685bb1cafc44d200
|
[
"MIT"
] | null | null | null |
Nimbus-Controller/sqs-fastreader.py
|
paulfdoyle/NIMBUS
|
0f309b620c00a9438c55404e685bb1cafc44d200
|
[
"MIT"
] | null | null | null |
Nimbus-Controller/sqs-fastreader.py
|
paulfdoyle/NIMBUS
|
0f309b620c00a9438c55404e685bb1cafc44d200
|
[
"MIT"
] | null | null | null |
# This script adds a new message to a specific SQS queue
#
# Author - Paul Doyle Aug 2013
#
#
#from __future__ import print_function
import sys
import Queue
import boto.sqs
import argparse
import socket
import datetime
import sys
import time
from boto.sqs.attributes import Attributes
parser = argparse.ArgumentParser()
parser.add_argument('queuearg',help='name of the sqs queue to use',metavar="myQueueName")
parser.add_argument('experiment',help='name of the experiment queue to use')
args = parser.parse_args()
from boto.sqs.message import Message
import threading
conn = boto.sqs.connect_to_region("us-east-1", aws_access_key_id='AKIAINWVSI3MIXIB5N3Q', aws_secret_access_key='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')
sqs_queue = conn.get_queue(args.queuearg)
class Sender(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global sqs_queue,queue
name = args.experiment+str(queue.get())+"-"+args.queuearg+".csv"
f = open(name,'w')
while True:
try:
m = sqs_queue.get_messages(num_messages=1,attributes='SentTimestamp')
f.write(str(m[0].attributes)+","+str(m[0].get_body())+"\n")
sqs_queue.delete_message(m[0])
except:
if sqs_queue.count() < 1:
f.write(args.queuearg + " is empty\n")
return
queue = Queue.Queue(0)
threads = []
for n in xrange(40):
queue.put(n)
t = Sender()
t.start()
threads.append(t)
for t in threads:
t.join()
| 24.603448
| 154
| 0.733006
| 511
| 0.358094
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.245971
|
6718237fd3891c8aa0d6df664410cd0f7651353e
| 1,547
|
py
|
Python
|
dero/ml/results/reformat.py
|
whoopnip/dero
|
62e081b341cc711ea8e1578e7c65b581eb74fa3f
|
[
"MIT"
] | null | null | null |
dero/ml/results/reformat.py
|
whoopnip/dero
|
62e081b341cc711ea8e1578e7c65b581eb74fa3f
|
[
"MIT"
] | 3
|
2020-03-24T17:57:46.000Z
|
2021-02-02T22:25:37.000Z
|
dero/ml/results/reformat.py
|
whoopnip/dero
|
62e081b341cc711ea8e1578e7c65b581eb74fa3f
|
[
"MIT"
] | null | null | null |
from typing import Optional
import pandas as pd
from dero.ml.typing import ModelDict, AllModelResultsDict, DfDict
def model_dict_to_df(model_results: ModelDict, model_name: Optional[str] = None) -> pd.DataFrame:
df = pd.DataFrame(model_results).T
df.drop('score', inplace=True)
df['score'] = model_results['score']
if model_name is not None:
df['model'] = model_name
first_cols = ['model', 'score']
else:
first_cols = ['score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols]
def all_model_results_dict_to_df(results: AllModelResultsDict) -> pd.DataFrame:
df = pd.DataFrame()
for model_type, instance_list in results.items():
for instance in instance_list:
model_df = model_dict_to_df(instance, model_name=model_type)
df = df.append(model_df)
first_cols = ['model', 'score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols].sort_values('score', ascending=False)
def all_model_results_dict_to_model_df_dict(results: AllModelResultsDict) -> DfDict:
out_dict = {}
for model_type, instance_list in results.items():
model_df = pd.DataFrame()
for instance in instance_list:
model_instance_df = model_dict_to_df(instance, model_name=model_type)
model_df = model_df.append(model_instance_df)
out_dict[model_type] = model_df.sort_values('score', ascending=False)
return out_dict
| 37.731707
| 97
| 0.700711
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.049774
|
67194761b98bb4ec0d555cbb6324bf54ba4345ac
| 663
|
py
|
Python
|
engine/view.py
|
amirgeva/py2d
|
88210240b71446d53ee85cf07ca8d253d522a265
|
[
"BSD-2-Clause"
] | null | null | null |
engine/view.py
|
amirgeva/py2d
|
88210240b71446d53ee85cf07ca8d253d522a265
|
[
"BSD-2-Clause"
] | null | null | null |
engine/view.py
|
amirgeva/py2d
|
88210240b71446d53ee85cf07ca8d253d522a265
|
[
"BSD-2-Clause"
] | null | null | null |
import pygame
from engine.utils import Rect
from engine.app import get_screen_size
# EXPORT
class View(object):
def __init__(self, rect=None):
if rect:
self.rect = rect
else:
res = get_screen_size()
self.rect = Rect(0,0,res[0],res[1])
def offset(self, d):
self.rect.move(d[0], d[1])
def get_position(self):
return self.rect.tl
def set_position(self, pos):
self.rect = Rect(pos.x, pos.y, pos.x+self.rect.width(), pos.y+self.rect.height())
def relative_position(self, pos):
return pos - self.rect.tl
def get_rect(self):
return Rect(self.rect)
| 23.678571
| 89
| 0.600302
| 569
| 0.85822
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.012066
|
67194cbd5bb79a7249d2ae1d8a3b2168422d756c
| 1,640
|
py
|
Python
|
oldplugins/coin.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | 1
|
2019-06-27T08:45:23.000Z
|
2019-06-27T08:45:23.000Z
|
oldplugins/coin.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | null | null | null |
oldplugins/coin.py
|
sonicrules1234/sonicbot
|
07a22d08bf86ed33dc715a800957aee3b45f3dde
|
[
"BSD-3-Clause"
] | null | null | null |
import shelve, random
arguments = ["self", "info", "args", "world"]
minlevel = 2
helpstring = "coin <bet>"
def main(connection, info, args, world) :
"""Decides heads or tails based on the coinchance variable. Adds or removes appropriate amount of money"""
money = shelve.open("money-%s.db" % (connection.networkname), writeback=True)
if money.has_key(info["sender"]) :
bet = int(args[1])
if bet <= money[info["sender"]]["money"] and bet >= 1 :
answer = random.choice(money[info["sender"]]["coinchance"])
if answer :
money[info["sender"]]["money"] += bet
money.sync()
connection.msg(info["channel"], _("Congrats %(sender)s! You just won %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
else :
money[info["sender"]]["money"] -= bet
money.sync()
connection.msg(info["channel"], _("Sorry %(sender)s! You just lost %(num)s dollars!") % dict(sender=info["sender"], num=args[1]))
if money[info["sender"]]["money"] > money[info["sender"]]["maxmoney"] :
money[info["sender"]]["maxmoney"] = money[info["sender"]]["money"]
money.sync()
else : connection.msg(info["channel"], _("%(sender)s: You don't have enough money to do that!") % dict(sender=info["sender"]))
else : connection.msg(info["channel"], _("%(sender)s: You have not set up a money account. If you aren't already, please register with me. Then, say moneyreset. After that you should be able to use this command.") % dict(sender=info["sender"]))
| 60.740741
| 251
| 0.587805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 696
| 0.42439
|
6719b8a502c31dfe0118ee06e1a1b37092b216f3
| 13,562
|
py
|
Python
|
src/rbvfit/vfit_mcmc.py
|
manoranjan-s/rbvfit
|
a5c450f721c08dda02c431a5a079945a73a0cfc2
|
[
"MIT"
] | null | null | null |
src/rbvfit/vfit_mcmc.py
|
manoranjan-s/rbvfit
|
a5c450f721c08dda02c431a5a079945a73a0cfc2
|
[
"MIT"
] | null | null | null |
src/rbvfit/vfit_mcmc.py
|
manoranjan-s/rbvfit
|
a5c450f721c08dda02c431a5a079945a73a0cfc2
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
def plot_model(wave_obs,fnorm,enorm,fit,model,outfile= False,xlim=[-600.,600.],verbose=False):
#This model only works if there are no nuissance paramteres
theta_prime=fit.best_theta
value1=fit.low_theta
value2=fit.high_theta
n_clump=model.nclump
n_clump_total=np.int(len(theta_prime)/3)
ntransition=model.ntransition
zabs=model.zabs
samples=fit.samples
model_mcmc=fit.model
wave_list=np.zeros( len(model.lambda_rest_original),)
# Use the input lambda rest list to plot correctly
for i in range(0,len(wave_list)):
s=rb.rb_setline(model.lambda_rest_original[i],'closest')
wave_list[i]=s['wave']
wave_rest=wave_obs/(1+zabs[0])
best_N = theta_prime[0:n_clump_total]
best_b = theta_prime[n_clump_total:2 * n_clump_total]
best_v = theta_prime[2 * n_clump_total:3 * n_clump_total]
low_N = value1[0:n_clump_total]
low_b = value1[n_clump_total:2 * n_clump_total]
low_v = value1[2 * n_clump_total:3 * n_clump_total]
high_N = value2[0:n_clump_total]
high_b = value2[n_clump_total:2 * n_clump_total]
high_v = value2[2 * n_clump_total:3 * n_clump_total]
#Now extracting individual fitted components
best_fit, f1 = model.model_fit(theta_prime, wave_obs)
fig, axs = plt.subplots(ntransition, sharex=True, sharey=False,figsize=(12,18 ),gridspec_kw={'hspace': 0})
BIGGER_SIZE = 18
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
index = np.random.randint(0, high=len(samples), size=100)
if ntransition == 1:
#When there are no nuissance parameter
#Now loop through each transition and plot them in velocity space
vel=rb_veldiff(wave_list[0],wave_rest)
axs.step(vel, fnorm, 'k-', linewidth=1.)
axs.step(vel, enorm, color='r', linewidth=1.)
# Plotting a random sample of outputs extracted from posterior dis
for ind in range(len(index)):
axs.plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs.set_ylim([0, 1.6])
axs.set_xlim(xlim)
axs.plot(vel, best_fit, color='b', linewidth=3)
axs.plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs.plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs.plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs.text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs.text(best_v[iclump]+30,1.2, text2,fontsize=14,rotation=90, rotation_mode='anchor')
else:
#Now loop through each transition and plot them in velocity space
for i in range(0,ntransition):
print(wave_list[i])
vel=rb_veldiff(wave_list[i],wave_rest)
axs[i].step(vel, fnorm, 'k-', linewidth=1.)
axs[i].step(vel, enorm, color='r', linewidth=1.)
#pdb.set_trace()
# Plotting a random sample of outputs extracted from posterior distribution
for ind in range(len(index)):
axs[i].plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs[i].set_ylim([0, 1.6])
axs[i].set_xlim(xlim)
axs[i].plot(vel, best_fit, color='b', linewidth=3)
axs[i].plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs[i].plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs[i].plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
if i ==0:
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs[i].text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs[i].text(best_v[iclump]+30,1.2, text2,
fontsize=14,rotation=90, rotation_mode='anchor')
if verbose==True:
from IPython.display import display, Math
samples = fit.sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(fit.ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
if outfile==False:
plt.show()
else:
outfile_fig =outfile
fig.savefig(outfile_fig, bbox_inches='tight')
######## Computing Likelihoods######
def lnprior(theta, lb, ub):
for index in range(0, len(lb)):
if (lb[index] > theta[index]) or (ub[index] < theta[index]):
return -np.inf
break
return 0.0
def lnlike(theta, model, x, y, yerr):
model = model(theta, x)
inv_sigma2 = 1.0 / (yerr ** 2)
return -0.5 * (np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, lb, ub, model, x, y, yerr):
lp = lnprior(theta, lb, ub)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, model, x, y, yerr)
def optimize_guess(model, theta, lb, ub, x, y, yerr):
nll = lambda *args: -lnprob(*args)
result = op.minimize(nll, [theta], args=(lb, ub, model, x, y, yerr))
p = result["x"]
return p
def set_bounds(nguess,bguess,vguess):
Nlow=np.zeros((len(nguess,)))
blow=np.zeros((len(nguess,)))
vlow=np.zeros((len(nguess,)))
NHI=np.zeros((len(nguess,)))
bHI=np.zeros((len(nguess,)))
vHI=np.zeros((len(nguess,)))
for i in range(0,len(nguess)):
Nlow[i]=nguess[i]-2.
blow[i]=bguess[i]-40.
if blow[i] < 2.:
blow[i] = 2.
vlow[i]=vguess[i]-50.
NHI[i]=nguess[i]+2.
bHI[i]=bguess[i]+40.
if bHI[i] > 200.:
bHI[i] = 150.
vHI[i]=vguess[i]+50.
lb=np.concatenate((Nlow,blow,vlow))
ub=np.concatenate((NHI,bHI,vHI))
bounds=[lb,ub]
return bounds, lb, ub
class vfit(object):
def __init__(self, model, theta, lb, ub, wave_obs, fnorm, enorm, no_of_Chain=50, no_of_steps=1000,
perturbation=1e-6):
# Main class that performs all the fitting
self.wave_obs = wave_obs
self.fnorm = fnorm
self.enorm = enorm
self.model = model
self.lb = lb
self.ub = ub
self.theta = theta
self.no_of_Chain = no_of_Chain
self.no_of_steps = no_of_steps
self.perturbation = perturbation
def runmcmc(self, optimize=True,verbose=False):
model = self.model
theta = self.theta
lb = self.lb
ub = self.ub
wave_obs = self.wave_obs
fnorm = self.fnorm
enorm = self.enorm
no_of_Chain = self.no_of_Chain
no_of_steps = self.no_of_steps
perturbation = self.perturbation
if optimize == True:
print('Optimizing Guess ***********')
# Now make a better guess
popt = optimize_guess(model, theta, lb, ub, wave_obs, fnorm, enorm)
print('Done ***********')
else:
print('Skipping Optimizing Guess ***********')
print('Using input guess for mcmc ***********')
popt = theta
print('Preparing emcee ***********')
###### Define a lot of walkers
length_of_lb = len(lb)
ndim, nwalkers = length_of_lb, no_of_Chain
guesses = [popt + perturbation * np.random.randn(ndim) for i in range(nwalkers)]
print("Starting emcee ***********")
burntime = np.round(no_of_steps * .2)
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub, model, wave_obs, fnorm, enorm))
pos, prob, state = sampler.run_mcmc(guesses, no_of_steps,progress=True)
#sampler.reset()
print("Done!")
#print("Now starting the Final Calculations:")
print("*****************")
#width = 30
# Now Running mcmc
#for i, result in enumerate(sampler.sample(pos, iterations=no_of_steps)):
# n = int((width + 1) * float(i) / no_of_steps)
#sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
#sys.stdout.write("\n")
if verbose==True:
from IPython.display import display, Math
samples = sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
self.sampler = sampler
self.ndim = ndim
self.nwalkers = nwalkers
def plot_corner(self,outfile=False):
ndim=self.ndim
#samples = self.sampler.chain[:, 100:, :].reshape((-1, ndim)) # sampler.flatchain
samples = self.sampler.get_chain(discard=100, thin=15, flat=True)
st = np.percentile(samples, 50, axis=0) # =np.median(samples,axis=0)#np.median(sampler.flatchain, axis=0)
# df = pd.DataFrame(samples)
# temp=df.mode()
# st=temp.values[0]
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
figure = corner.corner(samples, labels=text_label, truths=st)
theta_prime = st
value1 = np.percentile(samples, 10, axis=0)
# This is the empirical mean of the sample:
value2 = np.percentile(samples, 90, axis=0)
# Extract the axes
axes = np.array(figure.axes).reshape((ndim, ndim))
# Loop over the diagonal
for i in range(ndim):
ax = axes[i, i]
ax.axvline(value1[i], color="aqua")
ax.axvline(value2[i], color="aqua")
# Loop over the histograms
for yi in range(ndim):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(value1[xi], color="aqua")
ax.axvline(value2[xi], color="aqua")
# ax.axhline(value1[yi], color="g")
# ax.axhline(value2[yi], color="r")
# ax.plot(value1[xi], value1[yi], "sg")
# ax.plot(value2[xi], value2[yi], "sr")
self.best_theta=theta_prime
self.low_theta=value1
self.high_theta=value2
self.samples=samples
if outfile==False:
plt.show()
else:
outfile_fig =outfile
figure.savefig(outfile_fig, bbox_inches='tight')
| 36.262032
| 204
| 0.539891
| 4,997
| 0.368456
| 0
| 0
| 0
| 0
| 0
| 0
| 2,369
| 0.174679
|
671a19cd137db70202b7e3303f276604903cd2b5
| 6,409
|
py
|
Python
|
yolox/data/dataloading.py
|
XHYsdjkdsjsk2021/Yolox_xhy
|
a60f585d4d2bf36f9fa90b0a078efb7b315e0118
|
[
"Apache-2.0"
] | null | null | null |
yolox/data/dataloading.py
|
XHYsdjkdsjsk2021/Yolox_xhy
|
a60f585d4d2bf36f9fa90b0a078efb7b315e0118
|
[
"Apache-2.0"
] | null | null | null |
yolox/data/dataloading.py
|
XHYsdjkdsjsk2021/Yolox_xhy
|
a60f585d4d2bf36f9fa90b0a078efb7b315e0118
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import torch
from torch.utils.data.dataloader import DataLoader as torchDataLoader
from torch.utils.data.dataloader import default_collate
import os
import random
from .samplers import YoloBatchSampler
def get_yolox_datadir():
"""
get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
this function will return value of the environment variable. Otherwise, use data
"""
yolox_datadir = os.getenv("YOLOX_DATADIR", None)
if yolox_datadir is None:
import yolox
yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
yolox_datadir = os.path.join(yolox_path, "datasets")
return yolox_datadir
class DataLoader(torchDataLoader):
"""
Lightnet dataloader that enables on the fly resizing of the images.
See :class:`torch.utils.data.DataLoader` for more information on the arguments.
Check more on the following website:
https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py
Note:
This dataloader only works with :class:`lightnet.data.Dataset` based datasets.
Example:
>>> class CustomSet(ln.data.Dataset):
... def __len__(self):
... return 4
... @ln.data.Dataset.resize_getitem
... def __getitem__(self, index):
... # Should return (image, anno) but here we return (input_dim,)
... return (self.input_dim,)
>>> dl = ln.data.DataLoader(
... CustomSet((200,200)),
... batch_size = 2,
... collate_fn = ln.data.list_collate # We want the data to be grouped as a list
... )
>>> dl.dataset.input_dim # Default input_dim
(200, 200)
>>> for d in dl:
... d
[[(200, 200), (200, 200)]]
[[(200, 200), (200, 200)]]
>>> dl.change_input_dim(320, random_range=None)
(320, 320)
>>> for d in dl:
... d
[[(320, 320), (320, 320)]]
[[(320, 320), (320, 320)]]
>>> dl.change_input_dim((480, 320), random_range=None)
(480, 320)
>>> for d in dl:
... d
[[(480, 320), (480, 320)]]
[[(480, 320), (480, 320)]]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__initialized = False
shuffle = False
batch_sampler = None
if len(args) > 5:
shuffle = args[2]
sampler = args[3]
batch_sampler = args[4]
elif len(args) > 4:
shuffle = args[2]
sampler = args[3]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
elif len(args) > 3:
shuffle = args[2]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
else:
if "shuffle" in kwargs:
shuffle = kwargs["shuffle"]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
# Use custom BatchSampler
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(self.dataset)
# sampler = torch.utils.data.DistributedSampler(self.dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(self.dataset)
batch_sampler = YoloBatchSampler(
sampler,
self.batch_size,
self.drop_last,
input_dimension=self.dataset.input_dim,
)
# batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations =
self.batch_sampler = batch_sampler
self.__initialized = True
def close_mosaic(self):
self.batch_sampler.mosaic = False
def change_input_dim(self, multiple=32, random_range=(10, 19)):
"""This function will compute a new size and update it on the next mini_batch.
Args:
multiple (int or tuple, optional): values to multiply the randomly generated range by.
Default **32**
random_range (tuple, optional): This (min, max) tuple sets the range
for the randomisation; Default **(10, 19)**
Return:
tuple: width, height tuple with new dimension
Note:
The new size is generated as follows: |br|
First we compute a random integer inside ``[random_range]``.
We then multiply that number with the ``multiple`` argument,
which gives our final new input size. |br|
If ``multiple`` is an integer we generate a square size. If you give a tuple
of **(width, height)**, the size is computed
as :math:`rng * multiple[0], rng * multiple[1]`.
Note:
You can set the ``random_range`` argument to **None** to set
an exact size of multiply. |br|
See the example above for how this works.
"""
if random_range is None:
size = 1
else:
size = random.randint(*random_range)
if isinstance(multiple, int):
size = (size * multiple, size * multiple)
else:
size = (size * multiple[0], size * multiple[1])
self.batch_sampler.new_input_dim = size
return size
def list_collate(batch):
"""
Function that collates lists or tuples together into one list (of lists/tuples).
Use this as the collate function in a Dataloader, if you want to have a list of
items as an output, as opposed to tensors (eg. Brambox.boxes).
"""
items = list(zip(*batch))
for i in range(len(items)):
if isinstance(items[i][0], (list, tuple)):
items[i] = list(items[i])
else:
items[i] = default_collate(items[i])
return items
| 35.804469
| 99
| 0.555469
| 5,075
| 0.791855
| 0
| 0
| 0
| 0
| 0
| 0
| 3,563
| 0.555937
|
671a1a30341f98dfd27e877827d5eea516829e2a
| 7,765
|
py
|
Python
|
env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/_ec2_vpc_vpn_facts.py
|
unbounce/aws-name-asg-instances
|
e0379442e3ce71bf66ba9b8975b2cc57a2c7648d
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_vpc_vpn_info
version_added: 1.0.0
short_description: Gather information about VPN Connections in AWS.
description:
- Gather information about VPN Connections in AWS.
- This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: Madhura Naniwadekar (@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
required: false
type: dict
vpn_connection_ids:
description:
- Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
required: false
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all vpn connections
community.aws.ec2_vpc_vpn_info:
- name: Gather information about a filtered list of vpn connections, based on tags
community.aws.ec2_vpc_vpn_info:
filters:
"tag:Name": test-connection
register: vpn_conn_info
- name: Gather information about vpn connections by specifying connection IDs.
community.aws.ec2_vpc_vpn_info:
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_info
'''
RETURN = r'''
vpn_connections:
description: List of one or more VPN Connections.
returned: always
type: complex
contains:
category:
description: The category of the VPN connection.
returned: always
type: str
sample: VPN
customer_gatway_configuration:
description: The configuration information for the VPN connection's customer gateway (in the native XML format).
returned: always
type: str
customer_gateway_id:
description: The ID of the customer gateway at your end of the VPN connection.
returned: always
type: str
sample: cgw-17a53c37
options:
description: The VPN connection options.
returned: always
type: dict
sample: {
"static_routes_only": false
}
routes:
description: List of static routes associated with the VPN connection.
returned: always
type: complex
contains:
destination_cidr_block:
description: The CIDR block associated with the local subnet of the customer data center.
returned: always
type: str
sample: 10.0.0.0/16
state:
description: The current state of the static route.
returned: always
type: str
sample: available
state:
description: The current state of the VPN connection.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the VPN connection.
returned: always
type: dict
sample: {
"Name": "test-conn"
}
type:
description: The type of VPN connection.
returned: always
type: str
sample: ipsec.1
vgw_telemetry:
description: Information about the VPN tunnel.
returned: always
type: complex
contains:
accepted_route_count:
description: The number of accepted routes.
returned: always
type: int
sample: 0
last_status_change:
description: The date and time of the last change in status.
returned: always
type: str
sample: "2018-02-09T14:35:27+00:00"
outside_ip_address:
description: The Internet-routable IP address of the virtual private gateway's outside interface.
returned: always
type: str
sample: 13.127.79.191
status:
description: The status of the VPN tunnel.
returned: always
type: str
sample: DOWN
status_message:
description: If an error occurs, a description of the error.
returned: always
type: str
sample: IPSEC IS DOWN
certificate_arn:
description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
returned: when a private certificate is used for authentication
type: str
sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
vpn_connection_id:
description: The ID of the VPN connection.
returned: always
type: str
sample: vpn-f700d5c0
vpn_gateway_id:
description: The ID of the virtual private gateway at the AWS side of the VPN connection.
returned: always
type: str
sample: vgw-cbe56bfb
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_vpn_connections(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
try:
result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
except ValueError as e:
module.fail_json_aws(e, msg="Cannot validate JSON data")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
if snaked_vpn_connections:
for vpn_connection in snaked_vpn_connections:
vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
def main():
argument_spec = dict(
vpn_connection_ids=dict(default=[], type='list', elements='str'),
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['vpn_connection_ids', 'filters']],
supports_check_mode=True)
if module._module._name == 'ec2_vpc_vpn_facts':
module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws')
connection = module.client('ec2')
list_vpn_connections(connection, module)
if __name__ == '__main__':
main()
| 35.619266
| 157
| 0.642112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,660
| 0.728912
|
671aa126c99ce28f4a40eb764f765d0b5bf6665c
| 10,454
|
py
|
Python
|
cogs/roleselector.py
|
YouGotSchott/tcs-discord-bot
|
696db5da129ef42f4c5047679d289aeb6ed122a9
|
[
"MIT"
] | 1
|
2021-04-30T06:38:31.000Z
|
2021-04-30T06:38:31.000Z
|
cogs/roleselector.py
|
YouGotSchott/tcs-discord-bot
|
696db5da129ef42f4c5047679d289aeb6ed122a9
|
[
"MIT"
] | null | null | null |
cogs/roleselector.py
|
YouGotSchott/tcs-discord-bot
|
696db5da129ef42f4c5047679d289aeb6ed122a9
|
[
"MIT"
] | 1
|
2019-04-28T03:33:35.000Z
|
2019-04-28T03:33:35.000Z
|
import discord
from discord.ext import commands
from pathlib import Path
from config import bot
from collections import OrderedDict
import json
class RoleSelector(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.messages_path = str(Path('cogs/data/messages.json'))
async def opener(self):
with open(self.messages_path, 'r') as f:
return json.load(f)
async def closer(self, messages):
with open(self.messages_path, 'w') as f:
json.dump(messages, f)
@commands.Cog.listener()
async def on_ready(self):
emojis = self.emoji_selector(self.bot.guilds[0].id)
channel = discord.utils.get(self.bot.get_all_channels(), name='roles')
text = await self.embeder(self.data(emojis))
messages = await self.opener()
try:
self.msg = await channel.fetch_message(messages['role_message']['id'])
await self.msg.edit(embed=text)
except:
print("Role Message hasn't been added yet")
self.msg = await channel.send(embed=text)
messages['role_message'] = {}
messages['role_message']['id'] = self.msg.id
await self.closer(messages)
for emoji in emojis.values():
await self.msg.add_reaction(emoji=emoji)
@commands.Cog.listener(name='on_raw_reaction_add')
async def role_reaction_add(self, payload):
try:
if payload.message_id != self.msg.id:
return
except AttributeError:
return
guild = self.bot.get_guild(payload.guild_id)
user = guild.get_member(payload.user_id)
if user.id == self.bot.user.id:
return
emojis = self.emoji_selector(guild.id)
clean_emoji = str(payload.emoji).strip('<:>')
for k, v in emojis.items():
if v in clean_emoji:
role = discord.utils.get(user.guild.roles, name=k)
if 'mission-maker' in k:
results = await self.saturday_check()
if user.id not in results:
await self.msg.remove_reaction(v, user)
return
if 'auditor' in k:
role_mm = discord.utils.get(user.guild.roles, name='mission-maker')
if role_mm not in user.roles:
await self.msg.remove_reaction(v, user)
return
if role in user.roles:
await user.remove_roles(role)
else:
await user.add_roles(role)
await self.msg.remove_reaction(v, user)
async def saturday_check(self):
results = await self.bot.conn.fetch("""
SELECT user_id FROM attendance""")
id_list = [x["user_id"] for x in results]
return id_list
async def embeder(self, msg_embed):
em = discord.Embed(
title=self.msg_embed['title'], description=self.msg_embed['description'], color=0x008080)
em.set_thumbnail(url=self.msg_embed['thumbnail'])
for value in self.field_dict.values():
em.add_field(name=value['name'], value=value['value'], inline=False)
em.set_footer(text=self.footer['footer'])
return em
def emoji_selector(self, guild):
if 169696752461414401 == guild:
emojis = OrderedDict([
('mission-maker', 'feelscornman:485958281458876416'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'finger_gun:300089586460131328'),
('r6siege', '\U0001f308'),
('ricefields', 'rice_fields:483791993370181632'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'iron_uncle:548645154454765568'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'smoothbrain:592115163390410783'),
('destiny-2', '\U0001f47e'),
('squad', 'CplChad:409868955239579649'),
('zomboid', 'the_devil:663562931681624081')
])
else:
emojis = OrderedDict([
('mission-maker', 'uncle:567728566540697635'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'snek_uncle:567728565781528576'),
('r6siege', '\U0001f3c3'),
('ricefields', 'shadow_uncle:567728565248851989'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'jensen_uncle:567728565391589399'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'fast_uncle:567728565525807104'),
('destiny-2', '\U0001f47e'),
('squad', 'uncle_uncle:567728565785985025'),
('zomboid', 'uncle_hacker:567728565798567940')
])
return emojis
def data(self, emojis):
self.msg_embed = OrderedDict([
('title', '**TCS Role Selector**'),
('description', '''Use this tool to select optional Discord roles.\n\n'''
'''**DO NOT ABUSE THE BOT!**\n'''
'''\u200B'''),
('thumbnail', 'https://s3.amazonaws.com/files.enjin.com/1015535/site_logo/2020_logo.png')
])
self.field_dict = OrderedDict([
('mission_maker', OrderedDict([
('name', '<:{}> @mission-maker'.format(emojis['mission-maker'])),
('value', '''Provides access to our mission making channels, which *MAY HAVE SPOILERS*.\n\n'''
'''__**REQUIREMENTS**__\n'''
'''**__1.)__** You **MUST** attend a Saturday Op before taking this role.\n'''
'''**__2.)__** **ONLY** select this role if you plan on making missions for TCS.\n'''
'''**__3.)__** **DO NOT** use this role to provide feedback or suggestions in the mission making channel, use **#debriefing**.\n'''
'''**__4.)__** Understand that we make missions differently than other units.\n'''
'''**__5.)__** Understand that this is not an easy job and you might not get it right the first time.\n'''
'''\u200B''')])
),
('auditor', OrderedDict([
('name', '{} @auditor'.format(emojis['auditor'])),
('value', '''Allows other mission makers to ping you to check their missions for errors. *(requires @mission-maker tag)*\n''')])
),
('heretic', OrderedDict([
('name', '{} @heretic'.format(emojis['heretic'])),
('value', '''Provides access to the **#heresy** channel.\n'''
'''*A place for Warhammer 40K discussion and shitposting.*''')])
),
('liberation', OrderedDict([
('name', '<:{}> @liberation'.format(emojis['liberation'])),
('value', '''Allows other members to ping you to play *Arma 3 Liberation* on our server.''')])
),
('r6siege', OrderedDict([
('name', '{} @r6siege'.format(emojis['r6siege'])),
('value', '''Allows other members to ping you to play *Rainbow Six Siege*.''')])
),
('ricefields', OrderedDict([
('name', '<:{}> @ricefields'.format(emojis['ricefields'])),
('value', '''Allows other members to ping you to play *Rising Storm 2: Vietnam*.''')])
),
('minecraft', OrderedDict([
('name', '{} @minecraft'.format(emojis['minecraft'])),
('value', '''Allows other members to ping you to play *Minecraft* on our server.''')])
),
('flight_sims', OrderedDict([
('name', '{} @flight-sims'.format(emojis['flight-sims'])),
('value', '''Allows other members to ping you to play *DCS* or *IL2*.''')])
),
('vr', OrderedDict([
('name', '<:{}> @vr'.format(emojis['vr'])),
('value', '''Allows other members to ping you to play any *Virtual Reality Games*.''')])
),
('zeus-op', OrderedDict([
('name', '{} @zeus-op'.format(emojis['zeus-op'])),
('value', '''Allows other members to ping you to play *Impromptu Zeus Missions*.\n\n'''
'''__**RULES**__\n'''
'''**__1.)__** Don't expect someone to step-up as Zeus.\n'''
'''**__2.)__** Zeus has final say on what's allowed in their mission.\n'''
'''\u200B''')])
),
('4x', OrderedDict([
('name', '{} @4x'.format(emojis['4x'])),
('value', '''Allows other members to ping you to play *4X Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Hearts of Iron 4*\n'''
'''> *Stellaris*\n'''
'''\u200B''')])
),
('rts', OrderedDict([
('name', '<:{}> @rts'.format(emojis['rts'])),
('value', '''Allows other members to ping you to play *RTS Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Wargame: Red Dragon*\n'''
'''> *Wargame: War in the East*\n'''
'''> *Men of War: Assault Squad 2*\n'''
'''> *StarCraft 2*\n'''
'''\u200B''')])
),
('destiny-2', OrderedDict([
('name', '{} @destiny-2'.format(emojis['destiny-2'])),
('value', '''Allows other members to ping you to play *Destiny 2*.\n\n'''
)])
),
('squad', OrderedDict([
('name', '<:{}> @squad'.format(emojis['squad'])),
('value', '''Allows other members to ping you to play *Squad*.\n\n'''
)])
),
('zomboid', OrderedDict([
('name', '<:{}> @zomboid'.format(emojis['zomboid'])),
('value', '''Allows other members to ping you to play organized *Project Zomboid*.\n\n'''
)])
)
])
self.footer = OrderedDict([
('footer', '''React to toggle role on/off''')
])
def setup(bot):
bot.add_cog(RoleSelector(bot))
| 46.052863
| 147
| 0.505931
| 10,255
| 0.980964
| 0
| 0
| 2,124
| 0.203176
| 2,875
| 0.275014
| 4,257
| 0.407213
|
671b9c9f7b2c7728391666847cc8f06a6c3abea1
| 468
|
py
|
Python
|
Bunnies.py
|
fatih-iver/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
Bunnies.py
|
fatih-iver/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
Bunnies.py
|
fatih-iver/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
# Define a procedure, fibonacci, that takes a natural number as its input, and
# returns the value of that fibonacci number.
# Two Base Cases:
# fibonacci(0) => 0
# fibonacci(1) => 1
# Recursive Case:
# n > 1 : fibonacci(n) => fibonacci(n-1) + fibonacci(n-2)
def fibonacci(n):
return n if n == 0 or n == 1 else fibonacci(n-1) + fibonacci(n-2)
print (fibonacci(0))
#>>> 0
print (fibonacci(1))
#>>> 1
print (fibonacci(15))
#>>> 610
| 24.631579
| 79
| 0.604701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 290
| 0.619658
|
671bdca4dcc88d2670523ab9386ad959165e1bf4
| 1,876
|
py
|
Python
|
symphony/cli/graphql_compiler/tests/test_utils_codegen.py
|
remo5000/magma
|
1d1dd9a23800a8e07b1ce016776d93e12430ec15
|
[
"BSD-3-Clause"
] | 1
|
2020-06-05T09:01:40.000Z
|
2020-06-05T09:01:40.000Z
|
symphony/cli/graphql_compiler/tests/test_utils_codegen.py
|
remo5000/magma
|
1d1dd9a23800a8e07b1ce016776d93e12430ec15
|
[
"BSD-3-Clause"
] | 14
|
2019-11-15T12:01:18.000Z
|
2019-12-12T14:37:42.000Z
|
symphony/cli/graphql_compiler/tests/test_utils_codegen.py
|
remo5000/magma
|
1d1dd9a23800a8e07b1ce016776d93e12430ec15
|
[
"BSD-3-Clause"
] | 3
|
2019-11-15T15:56:25.000Z
|
2019-11-21T10:34:59.000Z
|
#!/usr/bin/env python3
from .base_test import BaseTest
from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk
class TestRendererDataclasses(BaseTest):
def test_codegen_write_simple_strings(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_args(self):
gen = CodeChunk()
gen.write('def {0}(a, b):', 'sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_kwargs(self):
gen = CodeChunk()
gen.write('def {method}(a, b):', method='sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_block(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
with gen.block():
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_block(self):
gen = CodeChunk()
with gen.write_block('def {name}(a, b):', name='sum'):
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_lines(self):
lines = [
'@staticmethod',
'def sum(a, b):'
' return a + b'
]
gen = CodeChunk()
gen.write('class Math:')
gen.indent()
gen.write_lines(lines)
code = str(gen)
m = self.load_module(code)
assert m.Math.sum(2, 3) == 5
| 24.363636
| 73
| 0.537846
| 1,743
| 0.929104
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.136994
|
671c056e5378258e43c069fd46366a89b0af73b7
| 202
|
py
|
Python
|
api/__init__.py
|
zhangyouliang/TencentComicBook
|
74d8e7e787f70554d5d982687540a6ac3225b9ed
|
[
"MIT"
] | null | null | null |
api/__init__.py
|
zhangyouliang/TencentComicBook
|
74d8e7e787f70554d5d982687540a6ac3225b9ed
|
[
"MIT"
] | null | null | null |
api/__init__.py
|
zhangyouliang/TencentComicBook
|
74d8e7e787f70554d5d982687540a6ac3225b9ed
|
[
"MIT"
] | null | null | null |
from flask import Flask
def create_app():
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
from .views import app as main_app
app.register_blueprint(main_app)
return app
| 18.363636
| 39
| 0.70297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.074257
|
671c98674cb5f008f240bb63dd21b79174a4ca79
| 898
|
py
|
Python
|
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py
|
a-a-egorovich/training_extensions
|
e0bbdfa4266c6ccfebf23ef303204a4a62fc290d
|
[
"Apache-2.0"
] | null | null | null |
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py
|
a-a-egorovich/training_extensions
|
e0bbdfa4266c6ccfebf23ef303204a4a62fc290d
|
[
"Apache-2.0"
] | null | null | null |
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/utils/get_config.py
|
a-a-egorovich/training_extensions
|
e0bbdfa4266c6ccfebf23ef303204a4a62fc290d
|
[
"Apache-2.0"
] | 1
|
2021-05-08T04:29:44.000Z
|
2021-05-08T04:29:44.000Z
|
import os
import json
def get_config(action, optimised = False):
""" action: train, test, export or gdrive
optimised: False --> DenseNet121
True --> DenseNet121Eff
"""
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
config_path = os.path.join(root_path, 'configs')
if action == 'download':
with open(os.path.join(config_path, 'download_configs.json')) as f1:
config = json.load(f1)
else:
if optimised:
with open(os.path.join(config_path, 'densenet121eff_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
else:
with open(os.path.join(config_path, 'densenet121_config.json')) as f1:
config_file = json.load(f1)
config = config_file[action]
return config
| 33.259259
| 93
| 0.609131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.253898
|
671d6732bc9abaae404bc6f0b8c59f26d23ca716
| 3,337
|
py
|
Python
|
src/udpa/annotations/versioning_pb2.py
|
pomerium/enterprise-client-python
|
366d72cc9cd6dc05fae704582deb13b1ccd20a32
|
[
"Apache-2.0"
] | 1
|
2021-09-14T04:34:29.000Z
|
2021-09-14T04:34:29.000Z
|
src/udpa/annotations/versioning_pb2.py
|
pomerium/enterprise-client-python
|
366d72cc9cd6dc05fae704582deb13b1ccd20a32
|
[
"Apache-2.0"
] | 3
|
2021-09-15T15:10:41.000Z
|
2022-01-04T21:03:03.000Z
|
src/udpa/annotations/versioning_pb2.py
|
pomerium/enterprise-client-python
|
366d72cc9cd6dc05fae704582deb13b1ccd20a32
|
[
"Apache-2.0"
] | 1
|
2021-09-13T21:51:37.000Z
|
2021-09-13T21:51:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: udpa/annotations/versioning.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='udpa/annotations/versioning.proto',
package='udpa.annotations',
syntax='proto3',
serialized_options=b'Z\"github.com/cncf/xds/go/annotations',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!udpa/annotations/versioning.proto\x12\x10udpa.annotations\x1a google/protobuf/descriptor.proto\"5\n\x14VersioningAnnotation\x12\x1d\n\x15previous_message_type\x18\x01 \x01(\t:^\n\nversioning\x12\x1f.google.protobuf.MessageOptions\x18\xd3\x88\xe1\x03 \x01(\x0b\x32&.udpa.annotations.VersioningAnnotationB$Z\"github.com/cncf/xds/go/annotationsb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
VERSIONING_FIELD_NUMBER = 7881811
versioning = _descriptor.FieldDescriptor(
name='versioning', full_name='udpa.annotations.versioning', index=0,
number=7881811, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_VERSIONINGANNOTATION = _descriptor.Descriptor(
name='VersioningAnnotation',
full_name='udpa.annotations.VersioningAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='previous_message_type', full_name='udpa.annotations.VersioningAnnotation.previous_message_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=89,
serialized_end=142,
)
DESCRIPTOR.message_types_by_name['VersioningAnnotation'] = _VERSIONINGANNOTATION
DESCRIPTOR.extensions_by_name['versioning'] = versioning
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VersioningAnnotation = _reflection.GeneratedProtocolMessageType('VersioningAnnotation', (_message.Message,), {
'DESCRIPTOR' : _VERSIONINGANNOTATION,
'__module__' : 'udpa.annotations.versioning_pb2'
# @@protoc_insertion_point(class_scope:udpa.annotations.VersioningAnnotation)
})
_sym_db.RegisterMessage(VersioningAnnotation)
versioning.message_type = _VERSIONINGANNOTATION
google_dot_protobuf_dot_descriptor__pb2.MessageOptions.RegisterExtension(versioning)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.258824
| 374
| 0.802218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,089
| 0.326341
|
671ef5ab0fb204c856b7864f6aaa3913e2ce45e8
| 2,787
|
py
|
Python
|
modules/action/scan_smbclient_nullsession.py
|
mrpnkt/apt2
|
542fb0593069c900303421f3f24a499ce8f3a6a8
|
[
"MIT"
] | 37
|
2018-08-24T20:13:19.000Z
|
2022-02-22T08:41:24.000Z
|
modules/action/scan_smbclient_nullsession.py
|
zu3s/apt2-1
|
67325052d2713a363183c23188a67e98a379eec7
|
[
"MIT"
] | 4
|
2020-06-14T23:16:45.000Z
|
2021-03-08T14:18:21.000Z
|
modules/action/scan_smbclient_nullsession.py
|
zu3s/apt2-1
|
67325052d2713a363183c23188a67e98a379eec7
|
[
"MIT"
] | 23
|
2018-11-15T13:00:09.000Z
|
2021-08-07T18:53:04.000Z
|
import re
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_smbclient_nullsession(actionModule):
def __init__(self, config, display, lock):
super(scan_smbclient_nullsession, self).__init__(config, display, lock)
self.title = "Test for NULL Session"
self.shortName = "NULLSessionSmbClient"
self.description = "execute [smbclient -N -L <IP>] on each target"
self.requirements = ["smbclient"]
self.triggers = ["newPort_tcp_445", "newPort_tcp_139"]
self.safeLevel = 5
def getTargets(self):
# we are interested in all hosts
self.targets = kb.get('port/tcp/139', 'port/tcp/445')
def process(self):
# load any targets we are interested in
self.getTargets()
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the new IP to the already seen list
self.addseentarget(t)
self.display.verbose(self.shortName + " - Connecting to " + t)
# get windows domain/workgroup
temp_file2 = self.config["proofsDir"] + "nmblookup_" + t + "_" + Utils.getRandStr(10)
command2 = self.config["nmblookup"] + " -A " + t
result2 = Utils.execWait(command2, temp_file2)
workgroup = "WORKGROUP"
for line in result2.split('\n'):
m = re.match(r'\s+(.*)\s+<00> - <GROUP>.*', line)
if (m):
workgroup = m.group(1).strip()
self.display.debug("found ip [%s] is on the workgroup/domain [%s]" % (t, workgroup))
# make outfile
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
# run rpcclient
command = self.config["smbclient"] + " -N -W " + workgroup + " -L " + t
result = Utils.execWait(command, outfile)
# check to see if it worked
if "Anonymous login successful" in result:
# fire a new trigger
self.fire("nullSession")
self.addVuln(t, "nullSession", {"type": "smb", "output": outfile.replace("/", "%2F")})
self.display.error("VULN [NULLSession] Found on [%s]" % t)
# TODO - process smbclient results
# parse out put and store any new info and fire any additional triggers
else:
# do nothing
self.display.verbose("Could not get NULL Session on %s" % t)
return
| 42.227273
| 108
| 0.545748
| 2,660
| 0.954431
| 0
| 0
| 0
| 0
| 0
| 0
| 912
| 0.327234
|
67217c13d08aaa4cb02ed01fdfa62904c93ef245
| 2,652
|
py
|
Python
|
UserSpace/Python/Cosmo.py
|
dkaramit/MiMeS
|
a3c97a4877f181b54e880d7b144271c5659291b5
|
[
"MIT"
] | 2
|
2022-01-27T20:10:19.000Z
|
2022-01-29T04:26:16.000Z
|
UserSpace/Python/Cosmo.py
|
dkaramit/MiMeS
|
a3c97a4877f181b54e880d7b144271c5659291b5
|
[
"MIT"
] | null | null | null |
UserSpace/Python/Cosmo.py
|
dkaramit/MiMeS
|
a3c97a4877f181b54e880d7b144271c5659291b5
|
[
"MIT"
] | null | null | null |
from numpy import logspace
from sys import path as sysPath
sysPath.append('../../src')
#load the module
from interfacePy import Cosmo
cosmo=Cosmo('../../src/data/eos2020.dat',0,1e5)
for T in logspace(-5,5,50):
print(
'T=',T,'GeV\t',
'H=',cosmo.Hubble(T),'GeV\t',
'h_eff=',cosmo.heff(T),'\t',
'g_eff=',cosmo.geff(T),'\t',
's=',cosmo.s(T),'GeV^3\t',
)
if False:
import matplotlib.pyplot as plt
#########-----g_eff and h_eff-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
gt=[cosmo.geff(i) for i in T]
ht=[cosmo.heff(i) for i in T]
sub.plot(T,gt,linestyle='--',c='xkcd:red',label=r"$g_{\rm eff} (T)$")
sub.plot(T,ht,linestyle=':',c='xkcd:black',label=r"$h_{\rm eff} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'rel. dof')
sub.legend(bbox_to_anchor=(1, 0.0),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('log')
sub.set_xscale('log')
fig.savefig('rdofs-T_examplePlot.pdf',bbox_inches='tight')
#########-----dg_effdT and dh_effdT-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dg=[cosmo.dgeffdT (i) for i in T]
dh=[cosmo.dheffdT(i) for i in T]
sub.plot(T,dg,linestyle='--',c='xkcd:red',label=r"$\dfrac{d g_{\rm eff}}{dT} (T)$")
sub.plot(T,dh,linestyle=':',c='xkcd:black',label=r"$\dfrac{d h_{\rm eff}}{dT} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.legend(bbox_to_anchor=(1, 0.5),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('symlog')
sub.set_xscale('log')
fig.savefig('drdofsdT-T_examplePlot.pdf',bbox_inches='tight')
#########-----dh-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dht=[cosmo.dh(i) for i in T]
sub.plot(T,dht,linestyle='-',c='xkcd:black')
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'$\delta_h = 1 + \dfrac{1}{3} \dfrac{d \log h_{\rm eff} }{d \log T}$')
sub.set_yscale('linear')
sub.set_xscale('log')
fig.savefig('dh-T_examplePlot.pdf',bbox_inches='tight')
| 28.212766
| 92
| 0.584465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 722
| 0.272247
|
6721e6112f2f0c4cefe44686fc888d2b7c5c0f42
| 5,236
|
py
|
Python
|
src/psion/oauth2/endpoints/revocation.py
|
revensky/psion
|
dfe38a1a4f4d6a5029d0973dbe1326415df6d512
|
[
"MIT"
] | 2
|
2021-02-22T22:12:23.000Z
|
2021-02-22T22:48:33.000Z
|
src/psion/oauth2/endpoints/revocation.py
|
revensky/psion
|
dfe38a1a4f4d6a5029d0973dbe1326415df6d512
|
[
"MIT"
] | null | null | null |
src/psion/oauth2/endpoints/revocation.py
|
revensky/psion
|
dfe38a1a4f4d6a5029d0973dbe1326415df6d512
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Optional
from psion.oauth2.exceptions import InvalidClient, OAuth2Error, UnsupportedTokenType
from psion.oauth2.models import JSONResponse, Request
from .base import BaseEndpoint
class RevocationEndpoint(BaseEndpoint):
"""
Endpoint used by the `Client` to revoke a token in its possession.
If the Client succeeds to authenticate but provides a token that was
not issued to itself, the `Provider` **DOES NOT** revoke the token,
since the Client is not authorized to operate the token.
If the token is already invalid, does not exist within the Provider
or is otherwise unknown or invalid, it is also considered "revoked".
:cvar `__authentication_methods__`: Allowed Client Authentication methods.
:cvar `__supported_tokens__`: Token types supported by the endpoint.
"""
__endpoint__: str = "revocation"
__authentication_methods__: list[str] = None
__supported_tokens__: list[str] = ["access_token", "refresh_token"]
async def __call__(self, request: Request) -> JSONResponse:
"""
Revokes a previously issued Token.
First it validates the `Revocation Request` of the `Client`
by making sure the required parameter "token" is present,
and that the `Client` can authenticate with the allowed
authentication methods.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.1>`_::
The client constructs the request by including the following
parameters using the "application/x-www-form-urlencoded" format in
the HTTP request entity-body:
token REQUIRED. The token that the client wants to get revoked.
token_type_hint OPTIONAL. A hint about the type of the token
submitted for revocation. Clients MAY pass this parameter in
order to help the authorization server to optimize the token
lookup. If the server is unable to locate the token using
the given hint, it MUST extend its search across all of its
supported token types. An authorization server MAY ignore
this parameter, particularly if it is able to detect the
token type automatically. This specification defines two
such values:
* access_token: An access token as defined in [RFC6749],
Section 1.4
* refresh_token: A refresh token as defined in [RFC6749],
Section 1.5
Specific implementations, profiles, and extensions of this
specification MAY define other values for this parameter
using the registry defined in Section 4.1.2.
For example, a client may request the revocation of a refresh token
with the following request:
POST /revoke HTTP/1.1
Host: server.example.com
Content-Type: application/x-www-form-urlencoded
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
token=45ghiukldjahdnhzdauz&token_type_hint=refresh_token
It then returns an empty response with a HTTP Status 200 OK,
signaling that the provided token has been revoked by the server.
From the specification at
`<https://www.rfc-editor.org/rfc/rfc7009.html#section-2.2>`_::
The authorization server responds with HTTP status code 200 if the
token has been revoked successfully or if the client submitted an
invalid token.
Note: invalid tokens do not cause an error response since the client
cannot handle such an error in a reasonable way. Moreover, the
purpose of the revocation request, invalidating the particular token,
is already achieved.
The content of the response body is ignored by the client as all
necessary information is conveyed in the response code.
An invalid token type hint value is ignored by the authorization
server and does not influence the revocation response.
This endpoint does not return any errors, except when the provided
`token_type_hint` is not supported by the Provider.
:raises UnsupportedTokenType: The provided token_type_hint is not supported.
"""
try:
client = await self.authenticate(request, self.__authentication_methods__)
data = request.form()
token: str = data.get("token")
token_type_hint: Optional[str] = data.get("token_type_hint")
if not token or not isinstance(token, str):
return
if token_type_hint:
if token_type_hint not in self.__supported_tokens__:
raise UnsupportedTokenType
await self.adapter.revoke_token(client, token, token_type_hint)
return JSONResponse()
except InvalidClient as exc:
return JSONResponse(401, exc.headers, exc.dump())
except OAuth2Error as exc:
return JSONResponse(400, exc.headers, exc.dump())
| 41.228346
| 86
| 0.661383
| 4,997
| 0.954354
| 0
| 0
| 0
| 0
| 4,200
| 0.802139
| 3,995
| 0.762987
|
67221620473d936c0d65eea07a40a563dbd162cf
| 1,851
|
py
|
Python
|
experiments/Browser/browser.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 3
|
2021-12-01T02:11:15.000Z
|
2022-02-03T22:45:00.000Z
|
experiments/Browser/browser.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 4
|
2021-11-27T11:04:36.000Z
|
2022-02-17T02:53:21.000Z
|
experiments/Browser/browser.py
|
rajKarra69420/bento
|
1324189e26acfe3a372882519bd78e037d93997c
|
[
"BSD-3-Clause"
] | 5
|
2021-07-01T20:23:43.000Z
|
2022-03-12T18:10:34.000Z
|
#!/usr/bin/env python3
import argparse
import logging
import sys
import zlib
sys.path.append("../..")
from bento.client.api import ClientConnection
from bento.common.protocol import *
import bento.common.util as util
function_name= "browser"
function_code= """
import requests
import zlib
import os
def browser(url, padding):
body= requests.get(url, timeout=1).content
compressed= zlib.compress(body)
final= compressed
if padding - len(final) > 0:
final= final + (os.urandom(padding - len(final)))
else:
final= final + (os.urandom((len(final) + padding) % padding))
api.send(final)
"""
@util.timeit
def main():
logging.basicConfig(format='%(levelname)s:\t%(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='Fetch a website and pad response with dummy bytes')
parser.add_argument('host', help="server's IPv4 address")
parser.add_argument('port', type=int, help="server's port")
parser.add_argument('url', help="URL to fetch")
parser.add_argument('padding', help="pad URL body to ne")
args = parser.parse_args()
conn= ClientConnection(args.host, args.port)
token, errmsg= conn.send_store_request(function_name, function_code)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got token: {token}")
call= f"{function_name}('{args.url}', {args.padding})"
session_id, errmsg= conn.send_execute_request(call, token)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got session_id: {session_id}")
logging.debug("Getting output...")
conn.send_open_request(session_id)
data, session_id, err= conn.get_sessionmsg()
print(zlib.decompress(data))
if __name__ == '__main__':
main()
| 27.626866
| 76
| 0.686116
| 0
| 0
| 0
| 0
| 1,178
| 0.636413
| 0
| 0
| 789
| 0.426256
|
67224f47630e980eac0b94abcd62dd84644278c0
| 3,429
|
py
|
Python
|
app/views/v1/search.py
|
daghan/Ostrich
|
b12057bee7b8b92aedf09ec40edc97a60340527b
|
[
"MIT"
] | null | null | null |
app/views/v1/search.py
|
daghan/Ostrich
|
b12057bee7b8b92aedf09ec40edc97a60340527b
|
[
"MIT"
] | null | null | null |
app/views/v1/search.py
|
daghan/Ostrich
|
b12057bee7b8b92aedf09ec40edc97a60340527b
|
[
"MIT"
] | null | null | null |
from app import webapp, mysql
from app.models import Search , Utils, Collection, WebUtils
from flask import request, jsonify
from flask.ext.jsonpify import jsonify as jsonp
import json
'''
Generic search call
@params
q: search query
page: the page number of search results (default 0)
type: type of search: {default: free(all fields), category, isbn}
@response
List of search result objects(ES)
'''
@webapp.route('/search')
def searchString():
response = {'status': 'False'}
results = {}
query = Utils.getParam(request.args, 'q')
page = Utils.getParam(request.args, 'page', var_type='int', default=1)
search_type = Utils.getParam(request.args, 'type', default='free')
user_id = Utils.getParam(request.args, 'userId', 'int')
flow = Utils.getParam(request.args, 'flow', default='borrow')
gcm_id = Utils.getParam(request.args, 'gcm_id', default=None)
uuid = Utils.getParam(request.args, 'distinct_id', default=None)
ref = Utils.getParam(request.args, 'ref', default='mobile')
if not query:
return Utils.errorResponse(response, 'HTTP_STATUS_CODE_DATA_MISSING')
if ref == 'web':
return jsonify(WebUtils.fetchSearchResults(query, search_type, page))
user_info = {'user_id': user_id, 'gcm_id': gcm_id, 'uuid': uuid}
search = Search(query, user_info, flow)
if search_type == 'free':
results = search.basicSearch(page=page-1)
elif search_type == 'category':
results = search.categorySearch(page=page-1)
elif search_type == 'collections':
results = search.collectionsSearch(page=page-1)
elif search_type == 'isbn':
results = search.isbnSearch(page=page-1)
elif search_type == 'auto':
results = search.autoComplete()
elif search_type == 'custom':
results = search.customQuery()
return results
#log
if user_id not in Utils.getAdmins():
Search.logSearch({_:request.args.get(_) for _ in request.args}, search_type)
return jsonify(results) if flow != 'admin' else jsonp(results)
@webapp.route('/getCategories')
def getCategories():
categories = Search.getSearchCategoriesForApp()
return jsonify(categories)
@webapp.route('/getCollectionCategory')
def getCollectionCategory():
return jsonify(Collection.getByCategory())
@webapp.route('/searchFail', methods=['POST'])
def searchFail():
#NOTE deprecated. Done directly from backend
return jsonify(status='true')
user_id = Utils.getParam(request.form, 'user_id', 'int')
q = Utils.getParam(request.form, 'q')
q_type = Utils.getParam(request.form,'type')
flow = Utils.getParam(request.form, 'flow', default='borrow')
Search(q, {'user_id': user_id}, flow).reportFail(True,True,q_type)
return jsonify(status='true')
@webapp.route('/recommended', methods=['GET'])
def recommended():
return jsonify(Search([]).mostRecommended())
@webapp.route('/mostSearched', methods=['GET'])
def mostSearched():
return jsonify(Search([]).mostSearched())
@webapp.route('/getMultiplePanels')
def getMultiplePanels():
cursor = mysql.connect().cursor()
cursor.execute("""SELECT collection_id FROM collections WHERE active = 1 AND
partial_order = 1 ORDER BY collection_id DESC""")
panels = []
for col_id in cursor.fetchall():
panels.append(Collection(col_id).getObj())
return jsonify(panels)
| 34.636364
| 84
| 0.680082
| 0
| 0
| 0
| 0
| 2,969
| 0.86585
| 0
| 0
| 826
| 0.240887
|
6722b1ddb17bb6d89f4ea39b1f185bec7d6cfcf6
| 555
|
py
|
Python
|
run.py
|
orest-d/pointcloud-viewer-rs
|
0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b
|
[
"MIT"
] | null | null | null |
run.py
|
orest-d/pointcloud-viewer-rs
|
0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b
|
[
"MIT"
] | null | null | null |
run.py
|
orest-d/pointcloud-viewer-rs
|
0d6d3f27e24d1783c4812a14457f8e20c4ef6f0b
|
[
"MIT"
] | null | null | null |
from flask import Flask, make_response
app = Flask(__name__)
@app.route("/")
@app.route("/index.html")
def index():
html = open("assets/index.html").read()
return html
@app.route("/assets/<name>")
def wasm(name):
r = make_response(open(f"assets/{name}","rb").read())
if name.endswith(".wasm"):
r.headers.set('Content-Type', "application/wasm")
return r
@app.route("/data.csv")
def csv():
print("GET CSV")
html = open("data.csv").read()
return html
if __name__ == "__main__":
app.run(debug=True,port=8080)
| 20.555556
| 57
| 0.625225
| 0
| 0
| 0
| 0
| 425
| 0.765766
| 0
| 0
| 150
| 0.27027
|
6724bee4efbfb26d55e405a724ed5a24e2b08168
| 8,496
|
py
|
Python
|
engine/audio/audio_director.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 3
|
2017-12-07T19:17:36.000Z
|
2021-07-29T18:24:25.000Z
|
engine/audio/audio_director.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 41
|
2017-11-11T06:00:08.000Z
|
2022-03-28T23:27:25.000Z
|
engine/audio/audio_director.py
|
codehearts/pickles-fetch-quest
|
ca9b3c7fe26acb50e1e2d654d068f5bb953bc427
|
[
"MIT"
] | 2
|
2018-08-31T23:49:00.000Z
|
2021-09-21T00:42:48.000Z
|
from .audio_source import AudioSource
from engine import disk
import pyglet.media
class AudioDirector(object):
"""Director for loading audio and controlling playback.
Attributes:
attenuation_distance (int): The default attenuation distance for newly
loaded audio. Existing audio will retain its attenuation distance,
see :fn:`set_attenuation_distance` for setting distance on existing
sources.
master_volume (float): The master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 disables
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower.
position (tuple of int): The location of the audio listener in
two-dimensional space. Listeners close to this position will be
louder than those further away.
"""
def __init__(self, master_volume=1, position=(0, 0)):
"""Creates a director for grouping and controlling audio playback.
Kwargs:
master_volume (float, optional): Master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 will disable
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower. Defaults to 1.
position (tuple of int, optional): The location of the audio
listener in two-dimensional space. Listeners close to this
position will be louder than those farther. Defaults to (0, 0).
"""
super(AudioDirector, self).__init__()
self.attenuation_distance = 1
self.master_volume = master_volume
self.position = position
# Cache of loaded resources from disk
self._disk_cache = {}
# Groupings for audio sources
self._groups = {
'all': set()
}
def load(self, filepath, streaming=True):
"""Loads and audio file from disk.
The loaded audio will be added to the 'all' group for this director.
A cached object will be returned if the file has already been loaded.
Streaming should be used for large audio sources, such as music.
Only one instance of a streaming audio source can be played at a time.
Args:
filepath (str): Path to audio, relative to the resource directory.
Kwargs:
streaming (bool, optional): Streams the audio from disk rather
than loading the entire file into memory. Defaults to True.
Returns:
An :obj:`audio.AudioSource` object for the resource on disk.
"""
# Load the file from disk and cache it if necessary
if filepath not in self._disk_cache:
disk_file = disk.DiskLoader.load_audio(filepath, streaming)
new_source = AudioSource(disk_file, streaming)
# Cache the new source
self._disk_cache[filepath] = new_source
# Apply the default attenuation distance
new_source.attenuation_distance = self.attenuation_distance
# Add this audio source to the default group
self.add(new_source)
return self._disk_cache[filepath]
def add(self, audio_source, group='all'):
"""Adds an audio source to a group.
Grouping audio allows you to control the playback of the entire group
rather than an individual source instance. By default, the audio source
is added to the 'all' group.
Args:
audio_source (:obj:`audio.AudioSource`): The audio source to add.
Kwargs:
group (str, optional): The group to add the audio to.
Defaults to 'all'.
"""
self._groups.setdefault(group, set()).add(audio_source)
def _filter_sources(self, group='all', states=None):
"""Returns all sources in the group matching the given states.
Kwargs:
group (str, optional): Name of group to filter. Defaults to 'all'.
states (list of int, optional): List of :cls:`AudioSource` states
to filter on. If the list is not empty and a source's state is
not in the list, it will be excluded from the return value.
Returns:
An iterator containing sources in the group matching the states.
"""
# If the group does not exist, return an empty iterator
if group not in self._groups:
return iter(())
# If there are no states to filter on, return all sources in the group
if not states:
return iter(self._groups[group])
# Return sources in the group matching the states to filter on
return filter(lambda src: src.state in states, self._groups[group])
def play(self, group='all'):
"""Plays all audio sources in a group.
Kwargs:
group (str, optional): Name of group to play. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.play()
def pause(self, group='all'):
"""Pauses all playing audio sources in a group.
Audio sources which are not currently playing will be left alone.
Kwargs:
group (str, optional): Name of group to pause. Defaults to 'all'.
"""
states = [AudioSource.PLAY]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.pause()
def stop(self, group='all'):
"""Stops all audio sources in a group.
Kwargs:
group (str, optional): Name of group to stop. Defaults to 'all'.
"""
states = [AudioSource.PLAY, AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.stop()
def resume(self, group='all'):
"""Resumes playback of all paused audio sources in a group.
Audio sources which are not currently paused will be left alone.
Kwargs:
group (str, optional): Name of group to resume. Defaults to 'all'.
"""
states = [AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.play()
def set_volume(self, level, group='all'):
"""Sets the volume of all audio sources in a group.
Args:
volume (float): 0 for silence, 1 for nominal volume.
Kwargs:
group (str, optional): Group to set volume of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.volume = level
def set_attenuation_distance(self, distance, group='all'):
"""Sets the distance from the listener before player volumes attenuate.
Args:
distance (int): The distance from the listener before the source
volume attenuates. Within this distance, the volume remains
nominal. Outside this distance, the volume approaches zero.
Kwargs:
group (str, optional): Group to set distance of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.attenuation_distance = distance
@property
def position(self):
"""The position of the listener in 2d space as a tuple-like type."""
return self._position
@position.setter
def position(self, position):
"""Sets the listener location in 2d space with a tuple-like object."""
self._position = position
# Pyglet uses 3d coordinates, convert 2d to a 3d tuple
listener = pyglet.media.get_audio_driver().get_listener()
listener.position = (position[0], position[1], 0)
@property
def master_volume(self):
"""Returns the master audio volume as a float between 0 and 1."""
listener = pyglet.media.get_audio_driver().get_listener()
return listener.volume
@master_volume.setter
def master_volume(self, level):
"""Sets the master audio playback volume.
0 for silence, 1 for nominal volume. Setting this to 1 disables audio
attenuation, ignoring the position of listeners. Set to 0.99 to
allow for audio positioning.
"""
listener = pyglet.media.get_audio_driver().get_listener()
listener.volume = level
| 38.27027
| 79
| 0.631474
| 8,411
| 0.989995
| 0
| 0
| 1,105
| 0.130061
| 0
| 0
| 5,463
| 0.643008
|
6726c80fc78ce012124f71d544ed59aef2223c32
| 2,858
|
py
|
Python
|
source/windows10 system repair tool.py
|
programmer24680/windows10-system-repair-tool
|
130e9c55a7448811994a4bc04f2c3362d96cf9c9
|
[
"MIT"
] | 1
|
2021-01-25T06:44:45.000Z
|
2021-01-25T06:44:45.000Z
|
source/windows10 system repair tool.py
|
programmer24680/windows10-system-repair-tool
|
130e9c55a7448811994a4bc04f2c3362d96cf9c9
|
[
"MIT"
] | null | null | null |
source/windows10 system repair tool.py
|
programmer24680/windows10-system-repair-tool
|
130e9c55a7448811994a4bc04f2c3362d96cf9c9
|
[
"MIT"
] | null | null | null |
import os
import time
print("=====================================================================")
print(" ")
print(" STARTING SYSTEM REPAIR ")
print(" ")
print("=====================================================================")
print(" ")
print("These are the jobs this application can do for you.")
print("1.Clean The DISM Component Store")
print("2.Repair Corrupted Windows Files Using SFC")
print("3.Repair Corrupted Windows Files Using DISM")
choice = input("Enter the serial number of the job which you want this application to do (1/2/3): ")
if choice == "1":
print("Analyzing Component Store")
os.system("dism.exe /Online /Cleanup-Image /AnalyzeComponentStore")
time.sleep(3)
print("Warning: You have to cleanup component store only if necessary.")
time.sleep(3)
Confirmation = input("Do you want to cleanup the component store?(y/n): ")
if Confirmation.upper() == "Y":
os.system("dism.exe /Online /Cleanup-Image /StartComponentCleanup")
time.sleep(3)
print("Now Exiting!")
elif Confirmation.upper() == "N":
print("Skipping Component Cleanup As Per The User's Instructions")
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
else:
print('You have to enter only "y" or "n"')
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
elif choice == "2":
print("Starting SFC Repair Job")
os.system("SFC /SCANNOW")
time.sleep(3)
print("Operation Cpmpleted Successfully!")
time.sleep(3)
print("Now Exiting!")
elif choice == "3":
Internet_Connection = input("Do you have an active internet connection?(y/n): ")
if Internet_Connection.upper() == "N":
iso_file = input("Do you have windows10 wim file?(y/n): ")
if iso_file.upper() == "Y":
Location = input("Enter the location of the wim file: ")
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth /Source:" + Location + " /LimitAccess")
time.sleep(3)
print("Now Exiting!")
else:
print("Sorry but you need either internet connection or wim file in order to run Dism")
time.sleep(3)
print("Now Exiting!")
elif Internet_Connection.upper() == "Y":
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth")
time.sleep(3)
print("Now Exiting")
else:
print("You have to enter only Y/N")
time.sleep(3)
else:
print("Choice Not Valid")
time.sleep(3)
print("Now Exiting!")
| 42.029412
| 109
| 0.537089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,650
| 0.577327
|
6728b39bc11d9e4b1e1974a7a10fb1bb5d2f22d9
| 3,368
|
py
|
Python
|
tests/test_fid_score.py
|
jwblangley/pytorch-fid
|
3d604a25516746c3a4a5548c8610e99010b2c819
|
[
"Apache-2.0"
] | 1,732
|
2018-03-05T19:20:48.000Z
|
2022-03-31T08:11:03.000Z
|
tests/test_fid_score.py
|
jwblangley/pytorch-fid
|
3d604a25516746c3a4a5548c8610e99010b2c819
|
[
"Apache-2.0"
] | 70
|
2018-06-29T07:48:43.000Z
|
2022-03-29T13:14:07.000Z
|
tests/test_fid_score.py
|
jwblangley/pytorch-fid
|
3d604a25516746c3a4a5548c8610e99010b2c819
|
[
"Apache-2.0"
] | 357
|
2018-03-14T06:35:24.000Z
|
2022-03-31T11:04:39.000Z
|
import numpy as np
import pytest
import torch
from PIL import Image
from pytorch_fid import fid_score, inception
@pytest.fixture
def device():
return torch.device('cpu')
def test_calculate_fid_given_statistics(mocker, tmp_path, device):
dim = 2048
m1, m2 = np.zeros((dim,)), np.ones((dim,))
sigma = np.eye(dim)
def dummy_statistics(path, model, batch_size, dims, device, num_workers):
if path.endswith('1'):
return m1, sigma
elif path.endswith('2'):
return m2, sigma
else:
raise ValueError
mocker.patch('pytorch_fid.fid_score.compute_statistics_of_path',
side_effect=dummy_statistics)
dir_names = ['1', '2']
paths = []
for name in dir_names:
path = tmp_path / name
path.mkdir()
paths.append(str(path))
fid_value = fid_score.calculate_fid_given_paths(paths,
batch_size=dim,
device=device,
dims=dim,
num_workers=0)
# Given equal covariance, FID is just the squared norm of difference
assert fid_value == np.sum((m1 - m2)**2)
def test_compute_statistics_of_path(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
model.side_effect = lambda inp: [inp.mean(dim=(2, 3), keepdim=True)]
size = (4, 4, 3)
arrays = [np.zeros(size), np.ones(size) * 0.5, np.ones(size)]
images = [(arr * 255).astype(np.uint8) for arr in arrays]
paths = []
for idx, image in enumerate(images):
paths.append(str(tmp_path / '{}.png'.format(idx)))
Image.fromarray(image, mode='RGB').save(paths[-1])
stats = fid_score.compute_statistics_of_path(str(tmp_path), model,
batch_size=len(images),
dims=3,
device=device,
num_workers=0)
assert np.allclose(stats[0], np.ones((3,)) * 0.5, atol=1e-3)
assert np.allclose(stats[1], np.ones((3, 3)) * 0.25)
def test_compute_statistics_of_path_from_file(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
mu = np.random.randn(5)
sigma = np.random.randn(5, 5)
path = tmp_path / 'stats.npz'
with path.open('wb') as f:
np.savez(f, mu=mu, sigma=sigma)
stats = fid_score.compute_statistics_of_path(str(path), model,
batch_size=1,
dims=5,
device=device,
num_workers=0)
assert np.allclose(stats[0], mu)
assert np.allclose(stats[1], sigma)
def test_image_types(tmp_path):
in_arr = np.ones((24, 24, 3), dtype=np.uint8) * 255
in_image = Image.fromarray(in_arr, mode='RGB')
paths = []
for ext in fid_score.IMAGE_EXTENSIONS:
paths.append(str(tmp_path / 'img.{}'.format(ext)))
in_image.save(paths[-1])
dataset = fid_score.ImagePathDataset(paths)
for img in dataset:
assert np.allclose(np.array(img), in_arr)
| 32.699029
| 77
| 0.540974
| 0
| 0
| 0
| 0
| 60
| 0.017815
| 0
| 0
| 176
| 0.052257
|
6728f13a7364357219192b47721a96d415fff8dc
| 873
|
py
|
Python
|
run/client.py
|
withcouragetol/codebee-10l
|
2636b8fc1b456a85201b868201cf9c147d739031
|
[
"Apache-2.0"
] | 6
|
2018-04-13T09:48:26.000Z
|
2020-06-22T13:42:10.000Z
|
run/client.py
|
withcouragetol/codebee-10l
|
2636b8fc1b456a85201b868201cf9c147d739031
|
[
"Apache-2.0"
] | null | null | null |
run/client.py
|
withcouragetol/codebee-10l
|
2636b8fc1b456a85201b868201cf9c147d739031
|
[
"Apache-2.0"
] | 2
|
2018-09-04T07:09:50.000Z
|
2019-08-18T15:11:00.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import time
class emsc_client:
def __init__(self):
self.host = "10.10.83.174"
self.port = 5000
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
self.conn.connect((self.host, self.port))
while True:
self.conn.send(("来自客户端发送的数据 : " + str(time.time())).encode())
data = self.conn.recv(1024).decode()
print("来自服务端数据 :" + data + "|" + str(time.time()))
time.sleep(100)
except:
print("服务器连接异常,尝试重新连接 (5s) ...")
self.conn.close()
time.sleep(5) # 断开连接后,每5s重新连接一次
emsc_client().run()
finally:
print("客户端已关闭 ...")
if __name__=="__main__":
emsc = emsc_client()
emsc.run()
| 24.942857
| 77
| 0.514318
| 828
| 0.854489
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.254902
|
672a7017194500a70a969cf6e26d3c8f610f807f
| 2,765
|
py
|
Python
|
src/sonic_ax_impl/main.py
|
stepanblyschak/sonic-snmpagent
|
45edd7e689922ecf90697d099285f7cce99742c8
|
[
"Apache-2.0"
] | 13
|
2016-03-09T20:38:16.000Z
|
2021-02-04T17:39:27.000Z
|
src/sonic_ax_impl/main.py
|
stepanblyschak/sonic-snmpagent
|
45edd7e689922ecf90697d099285f7cce99742c8
|
[
"Apache-2.0"
] | 167
|
2017-02-01T23:16:11.000Z
|
2022-03-31T02:22:08.000Z
|
src/sonic_ax_impl/main.py
|
xumia/sonic-snmpagent
|
4e063e4ade89943f2413a767f24564aecfa2cd1c
|
[
"Apache-2.0"
] | 89
|
2016-03-09T20:38:18.000Z
|
2022-03-09T09:16:13.000Z
|
"""
SNMP subagent entrypoint.
"""
import asyncio
import functools
import os
import signal
import sys
import ax_interface
from sonic_ax_impl.mibs import ieee802_1ab
from . import logger
from .mibs.ietf import rfc1213, rfc2737, rfc2863, rfc3433, rfc4292, rfc4363
from .mibs.vendor import dell, cisco
# Background task update frequency ( in seconds )
DEFAULT_UPDATE_FREQUENCY = 5
event_loop = asyncio.get_event_loop()
shutdown_task = None
class SonicMIB(
rfc1213.InterfacesMIB,
rfc1213.IpMib,
rfc1213.SysNameMIB,
rfc2737.PhysicalTableMIB,
rfc3433.PhysicalSensorTableMIB,
rfc2863.InterfaceMIBObjects,
rfc4363.QBridgeMIBObjects,
rfc4292.IpCidrRouteTable,
ieee802_1ab.LLDPLocalSystemData,
ieee802_1ab.LLDPLocalSystemData.LLDPLocPortTable,
ieee802_1ab.LLDPLocalSystemData.LLDPLocManAddrTable,
ieee802_1ab.LLDPRemTable,
ieee802_1ab.LLDPRemManAddrTable,
dell.force10.SSeriesMIB,
cisco.bgp4.CiscoBgp4MIB,
cisco.ciscoPfcExtMIB.cpfcIfTable,
cisco.ciscoPfcExtMIB.cpfcIfPriorityTable,
cisco.ciscoSwitchQosMIB.csqIfQosGroupStatsTable,
cisco.ciscoEntityFruControlMIB.cefcFruPowerStatusTable,
):
"""
If SONiC was to create custom MIBEntries, they may be specified here.
"""
def shutdown(signame, agent):
# FIXME: If the Agent dies, the background tasks will zombie.
global event_loop, shutdown_task
logger.info("Recieved '{}' signal, shutting down...".format(signame))
shutdown_task = event_loop.create_task(agent.shutdown())
def main(update_frequency=None):
global event_loop
try:
# initialize handler and set update frequency (or use the default)
agent = ax_interface.Agent(SonicMIB, update_frequency or DEFAULT_UPDATE_FREQUENCY, event_loop)
# add "shutdown" signal handlers
# https://docs.python.org/3.5/library/asyncio-eventloop.html#set-signal-handlers-for-sigint-and-sigterm
for signame in ('SIGINT', 'SIGTERM'):
event_loop.add_signal_handler(getattr(signal, signame),
functools.partial(shutdown, signame, agent))
# start the agent, wait for it to come back.
logger.info("Starting agent with PID: {}".format(os.getpid()))
event_loop.run_until_complete(agent.run_in_event_loop())
except Exception:
logger.exception("Uncaught exception in {}".format(__name__))
sys.exit(1)
finally:
if shutdown_task is not None:
# make sure shutdown has completed completely before closing the loop
event_loop.run_until_complete(shutdown_task)
# the agent runtime has exited, close the event loop and exit.
event_loop.close()
logger.info("Goodbye!")
sys.exit(0)
| 32.151163
| 111
| 0.718626
| 808
| 0.292224
| 0
| 0
| 0
| 0
| 0
| 0
| 726
| 0.262568
|
672a72c5fc5af6da05a603f68e577831d5bb4e8d
| 8,000
|
py
|
Python
|
btk_server.py
|
bedrin/keyboard_mouse_emulate_on_raspberry
|
2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c
|
[
"MIT"
] | null | null | null |
btk_server.py
|
bedrin/keyboard_mouse_emulate_on_raspberry
|
2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c
|
[
"MIT"
] | null | null | null |
btk_server.py
|
bedrin/keyboard_mouse_emulate_on_raspberry
|
2f1f0cff4b5c5b2e20159d0e91542ec8a5a48e3c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from __future__ import absolute_import, print_function
from optparse import OptionParser, make_option
import os
import sys
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
import time
import socket
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
import logging
from logging import debug, info, warning, error
import keymap
logging.basicConfig(level=logging.DEBUG)
class BTKbDevice():
# change these constants
MY_ADDRESS = "B8:27:EB:87:15:DC"
MY_DEV_NAME = "Raspberry_Keyboard"
errorCount = 0
# define some constants
P_CTRL = 17 # Service port - must match port configured in SDP record
P_INTR = 19 # Service port - must match port configured in SDP record#Interrrupt port
# dbus path of the bluez profile we will create
# file path of the sdp record to load
SDP_RECORD_PATH = sys.path[0] + "/sdp_record.xml"
UUID = "00001124-0000-1000-8000-00805f9b34fb"
def __init__(self):
logging.info("2. Setting up BT device")
self.init_bt_device()
self.init_bluez_profile()
self.set_bt_class()
# configure the bluetooth hardware device
def init_bt_device(self):
logging.info("3. Configuring Device name " + BTKbDevice.MY_DEV_NAME)
# set the device class to a keybord and set the name
os.system("hciconfig hci0 up")
os.system("hciconfig hci0 class 0x0025C0")
os.system("hciconfig hci0 name " + BTKbDevice.MY_DEV_NAME)
# make the device discoverable
os.system("hciconfig hci0 piscan")
def set_bt_class(self):
logging.info("workaround. Setting bluetooth class again")
os.system("hciconfig hci0 class 0x0025C0")
# set up a bluez profile to advertise device capabilities from a loaded service record
def init_bluez_profile(self):
logging.info("4. Configuring Bluez Profile")
# setup profile options
service_record = self.read_sdp_service_record()
opts = {
"AutoConnect": True,
"ServiceRecord": service_record
}
# retrieve a proxy for the bluez profile interface
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object(
"org.bluez", "/org/bluez"), "org.bluez.ProfileManager1")
manager.RegisterProfile("/org/bluez/hci0", BTKbDevice.UUID, opts)
logging.info("6. Profile registered ")
os.system("hciconfig hci0 -a")
# read and return an sdp record from a file
def read_sdp_service_record(self):
logging.info("5. Reading service record")
try:
fh = open(BTKbDevice.SDP_RECORD_PATH, "r")
except:
sys.exit("Could not open the sdp record. Exiting...")
return fh.read()
# listen for incoming client connections
def listen(self):
logging.info("\033[0;33m7. Waiting for connections\033[0m")
self.scontrol = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) # BluetoothSocket(L2CAP)
self.sinterrupt = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) # BluetoothSocket(L2CAP)
self.scontrol.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sinterrupt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind these sockets to a port - port zero to select next available
self.scontrol.bind((socket.BDADDR_ANY, self.P_CTRL))
self.sinterrupt.bind((socket.BDADDR_ANY, self.P_INTR))
# Start listening on the server sockets
self.scontrol.listen(5)
self.sinterrupt.listen(5)
self.ccontrol, cinfo = self.scontrol.accept()
print (
"\033[0;32mGot a connection on the control channel from %s \033[0m" % cinfo[0])
self.cinterrupt, cinfo = self.sinterrupt.accept()
print (
"\033[0;32mGot a connection on the interrupt channel from %s \033[0m" % cinfo[0])
# send a string to the bluetooth host machine
def send_string(self, message):
global errorCount
try:
self.cinterrupt.send(bytes(message))
errorCount = 0
except OSError as err:
error(err)
errorCount += 1
if errorCount > 50 :
sys.exit()
class BTKbService(dbus.service.Object):
def __init__(self):
logging.info("1. Setting up service")
# create and setup our device
self.device = BTKbDevice()
# start listening for connections
self.device.listen()
self.scancodes = {
" ": "KEY_SPACE",
"→": "KEY_RIGHT",
"↵": "KEY_ENTER"
}
# the structure for a bt keyboard input report (size is 10 bytes)
self.interimstate = [
0xA1, # this is an input report
0x01, # Usage report = Keyboard
# Bit array for Modifier keys
[0x01, # Right GUI - Windows Key
0, # Right ALT
0, # Right Shift
0, # Right Control
0, # Left GUI
0, # Left ALT
0, # Left Shift
0], # Left Control
0x00, # Vendor reserved
0x00, # rest is space for 6 keys
0x00,
0x00,
0x00,
0x00,
0x00]
# start infinite loop
while True:
for x in range(0,12):
logging.info("sending ENTER (↵)")
self.send_string(0, "↵")
logging.info("sent ENTER (↵)")
time.sleep(10)
logging.info("sending RIGHT (→)")
self.send_string(0, "→")
logging.info("sent RIGHT (→)")
time.sleep(1)
logging.info("sending ENTER (↵)")
self.send_string(0, "↵")
logging.info("sent ENTER (↵)")
time.sleep(10)
logging.info("sending CTRL+R")
self.send_string(0x01, "R")
logging.info("sent CTRL+R")
time.sleep(10)
def send_key_state(self):
"""sends a single frame of the current key state to the emulator server"""
bin_str = ""
element = self.interimstate[2]
for bit in element:
bin_str += str(bit)
self.send_keys(int(bin_str, 2), self.interimstate[4:10])
def send_key_down(self, modifier, scancode):
"""sends a key down event to the server"""
self.interimstate[2] = [modifier, 0, 0, 0, 0, 0, 0, 0]
self.interimstate[4] = scancode
self.send_key_state()
def send_key_up(self):
"""sends a key up event to the server"""
self.interimstate[2] = [0,0,0,0,0,0,0,0]
self.interimstate[4] = 0
self.send_key_state()
def send_string(self, modifier, string_to_send):
for c in string_to_send:
cu = c.upper()
if(cu in self.scancodes):
scantablekey = self.scancodes[cu]
else:
scantablekey = "KEY_"+c.upper()
logging.info(scantablekey)
scancode = keymap.keytable[scantablekey]
self.send_key_down(modifier, scancode)
time.sleep(0.01)
self.send_key_up()
time.sleep(0.01)
def send_keys(self, modifier_byte, keys):
logging.info("Get send_keys request through dbus")
logging.info("key msg: %s", keys)
state = [ 0xA1, 1, 0, 0, 0, 0, 0, 0, 0, 0 ]
state[2] = int(modifier_byte)
count = 4
for key_code in keys:
if(count < 10):
state[count] = int(key_code)
count += 1
self.device.send_string(state)
# main routine
if __name__ == "__main__":
try:
DBusGMainLoop(set_as_default=True)
myservice = BTKbService()
loop = GLib.MainLoop()
loop.run()
except KeyboardInterrupt:
sys.exit()
| 34.188034
| 103
| 0.59525
| 7,353
| 0.916604
| 0
| 0
| 0
| 0
| 0
| 0
| 2,432
| 0.303166
|
672b2fd274da4c3abef696a1ce2183fc11422e30
| 11,479
|
py
|
Python
|
ai2thor/util/visualize_3D_bbox.py
|
KuoHaoZeng/ai2thor-1
|
7cc3295f8ac7a272078159f44b74bf61d1d2bb56
|
[
"Apache-2.0"
] | null | null | null |
ai2thor/util/visualize_3D_bbox.py
|
KuoHaoZeng/ai2thor-1
|
7cc3295f8ac7a272078159f44b74bf61d1d2bb56
|
[
"Apache-2.0"
] | null | null | null |
ai2thor/util/visualize_3D_bbox.py
|
KuoHaoZeng/ai2thor-1
|
7cc3295f8ac7a272078159f44b74bf61d1d2bb56
|
[
"Apache-2.0"
] | null | null | null |
import ai2thor.controller
import numpy as np
from PIL import Image, ImageDraw
def get_rotation_matrix(agent_rot):
#######
# Construct the rotation matrix. Ref: https://en.wikipedia.org/wiki/Rotation_matrix
#######
r_y = np.array([[np.cos(np.radians(agent_rot["y"])), 0, np.sin(np.radians(agent_rot["y"]))],
[0, 1, 0],
[-np.sin(np.radians(agent_rot["y"])), 0, np.cos(np.radians(agent_rot["y"]))]])
r_x = np.array([[1, 0, 0],
[0, np.cos(np.radians(agent_rot["x"])), -np.sin(np.radians(agent_rot["x"]))],
[0, np.sin(np.radians(agent_rot["x"])), np.cos(np.radians(agent_rot["x"]))]])
r = r_x @ r_y
return r
def project_to_agent_coordinate(pos, agent_pos, r):
#######
# Project a position from the world coordinate to the agent coordinate.
#######
pos_diff = pos - agent_pos
# since AI2THOR is left-handed coordinate system, we need to turn it to the right-handed to use the rotation matrix
pos_diff[2] *= -1
new_pos = r @ pos_diff
# turn back to the left-handed coordinate system
new_pos[2] *= -1
return new_pos
def project_to_2d(pos, half_fov, w, h):
#######
# Project a given 3D position to 2D space.
#######
pos_2d = [pos[0] / (pos[2] * np.tan(np.radians(half_fov))),
pos[1] / (pos[2] * np.tan(np.radians(half_fov)))]
# x-axis
x = int(w * ((pos_2d[0] + 1.0) / 2.0))
# y-axis
y = int(h * (1 - ((pos_2d[1] + 1.0) / 2.0)))
return [x, y]
def draw_3d_bbox(event):
#######
# Draw the 3D bbox in 2D RGB image by first construct the rotation matrix and get agent position by the agent pose,
# then filter out the objects which are not visible to the agent.
# Finally, project the 3D bbox to 2D space and draw it on the 2D RGB image and return the event dict with image.
#######
# get the 2D image width and height
w, h = event.metadata["screenWidth"], event.metadata["screenHeight"]
# get the FOV
half_fov = event.metadata["fov"] / 2
# get the camera rotation matrix
agent_rot = event.metadata["agent"]["rotation"]
agent_rot["x"] = event.metadata["agent"]["cameraHorizon"]
rotation_matrix = get_rotation_matrix(agent_rot)
# get the camera 3D position
agent_pos = np.array([event.metadata["cameraPosition"]["x"],
event.metadata["cameraPosition"]["y"],
event.metadata["cameraPosition"]["z"]])
# get the 2D RGB image and allocate a drawer
img = Image.fromarray(event.frame, "RGB")
draw = ImageDraw.Draw(img)
# iterate over all objects in the scene
# first classify if the object is in the view by rotated z position and instance segmentation
# then draw the 3D bbox in the 2D RGB image
for obj in event.metadata["objects"]:
# get object 3D position and rotate it to the agent coordinate
pos = np.array([obj["position"]["x"], obj["position"]["y"], obj["position"]["z"]])
new_pos = project_to_agent_coordinate(pos, agent_pos, rotation_matrix)
# classify is the object is in front of the agent
if new_pos[2] > 0:
# classify if the object is seen by the agent (not occluded by other objects)
if obj["objectId"] in event.instance_masks.keys():
# don't draw the floor and ceiling objects
if "Floor" in obj["objectId"] or "Ceiling" in obj["objectId"]:
if "Lamp" not in obj["objectId"]:
continue
# get the object color from the instance segmentation
color = event.object_id_to_color[obj["objectId"]]
# get the 3D bbox center and size
vertices, valid = [], []
if not isinstance(obj["objectOrientedBoundingBox"], type(None)):
# get the 3D bbox 8 vertices
corner_points = obj["objectOrientedBoundingBox"]["cornerPoints"]
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[3]],
[vertices[1], vertices[2]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[7]],
[vertices[5], vertices[6]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[3],
valid[1] * valid[2],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[7],
valid[5] * valid[6],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
else:
if "cornerPoints" in obj["axisAlignedBoundingBox"].keys():
# get the 3D bbox 8 vertices
corner_points = obj["axisAlignedBoundingBox"]["cornerPoints"]
else:
# get the 3D bbox 8 vertices from bbox center and size
center = np.array([obj["axisAlignedBoundingBox"]["center"]["x"],
obj["axisAlignedBoundingBox"]["center"]["y"],
obj["axisAlignedBoundingBox"]["center"]["z"]])
size = np.array([obj["axisAlignedBoundingBox"]["size"]["x"],
obj["axisAlignedBoundingBox"]["size"]["y"],
obj["axisAlignedBoundingBox"]["size"]["z"]])
corner_points = []
for i in range(2):
pos_x = np.array(center)
pos_x[0] = pos_x[0] - (size[0] / 2) + (i * size[0])
for j in range(2):
pos_y = np.array(pos_x)
pos_y[1] = pos_y[1] - (size[1] / 2) + (j * size[1])
for k in range(2):
pos_z = np.array(pos_y)
pos_z[2] = pos_z[2] - (size[2] / 2) + (k * size[2])
corner_points.append(pos_z)
# project vertices to 2D image coordinate
for point in corner_points:
new_point = project_to_agent_coordinate(point, agent_pos, rotation_matrix)
if new_point[2] > 0:
valid.append(True)
else:
valid.append(False)
new_point_2d = project_to_2d(new_point, half_fov, w, h)
vertices.append(new_point_2d)
# get the 3D bbox 12 lines
lines = [[vertices[0], vertices[1]],
[vertices[2], vertices[3]],
[vertices[0], vertices[2]],
[vertices[1], vertices[3]],
[vertices[4], vertices[5]],
[vertices[6], vertices[7]],
[vertices[4], vertices[6]],
[vertices[5], vertices[7]],
[vertices[2], vertices[6]],
[vertices[3], vertices[7]],
[vertices[1], vertices[5]],
[vertices[0], vertices[4]]]
valid_lines = [valid[0] * valid[1],
valid[2] * valid[3],
valid[0] * valid[2],
valid[1] * valid[3],
valid[4] * valid[5],
valid[6] * valid[7],
valid[4] * valid[6],
valid[5] * valid[7],
valid[2] * valid[6],
valid[3] * valid[7],
valid[1] * valid[5],
valid[0] * valid[4]]
lines = np.array(lines)
lines = np.reshape(lines, (-1, 4))
valid_lines = np.array(valid_lines)
valid_lines = np.reshape(valid_lines, (-1, 1))
# draw the 3D bbox 12 lines in the 2D RGB image
for iii, line in enumerate(lines):
if valid_lines[iii]:
draw.line((line[0], line[1], line[2], line[3]), fill=color, width=2)
# store the result back to the event
bbox_frame = np.array(img)
event.bbox_3d_frame = bbox_frame
return event
if __name__ == "__main__":
# give the height and width of the 2D image and scene id
w, h = 900, 900
scene = "FloorPlan2{:02d}_physics".format(1)
# allocate controller and initialize the scene and agent
# local_path = "src/ai2thor/unity/builds/thor-local-OSXIntel64.app/Contents/MacOS/AI2-Thor"
local_path = ""
controller = ai2thor.controller.Controller(local_path=local_path)
_ = controller.start(width=w, height=h)
_ = controller.reset(scene)
event = controller.step(dict(action='Initialize',
gridSize=0.25,
renderClassImage=True,
renderObjectImage=True,
renderDepthImage=True,
fieldOfView=90))
# do something then draw the 3D bbox in 2D image
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="MoveAhead"))
event = controller.step(dict(action="Rotate", rotation=dict(x=0, y=30, z=0)))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output1.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output2.png")
event = controller.step(dict(action="LookDown"))
event = draw_3d_bbox(event)
img = Image.fromarray(event.bbox_3d_frame, "RGB")
img.save("./output3.png")
| 46.100402
| 119
| 0.4787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,732
| 0.238
|
672b4006ae24930b53edb66efd8fb73b92773911
| 3,754
|
py
|
Python
|
sa/profiles/ElectronR/KO01M/get_metrics.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/profiles/ElectronR/KO01M/get_metrics.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/profiles/ElectronR/KO01M/get_metrics.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# ElectronR.KO01M.get_metrics
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics
class Script(GetMetricsScript):
name = "ElectronR.KO01M.get_metrics"
@metrics(["Environment | Sensor Status"], volatile=False, access="S") # SNMP version
def get_sensor_status(self, metrics):
for metric in metrics:
value = 1
if metric.ifindex == 100:
continue
elif metric.ifindex == 140:
temp = self.snmp.get("1.3.6.1.4.1.35419.20.1.140.0", cached=True)
if -55 < temp < 600:
value = 0
elif metric.ifindex == 160:
impulse = self.snmp.get("1.3.6.1.4.1.35419.20.1.160.0", cached=True)
if impulse != 0:
value = 0
else:
res = self.snmp.get("1.3.6.1.4.1.35419.20.1.10%s.0" % metric.ifindex)
if res == 1:
value = 0
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Sensor Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Temperature"], volatile=False, access="S") # SNMP version
def get_temperature(self, metrics):
for metric in metrics:
if metric.ifindex == 140:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Temperature", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Voltage"], volatile=False, access="S") # SNMP version
def get_voltage(self, metrics):
for metric in metrics:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Voltage", metric.labels),
labels=[f"noc::module::{port}", f"noc::sensor::{port}"],
value=value,
multi=True,
)
@metrics(["Environment | Pulse"], volatile=False, access="S") # SNMP version
def get_pulse(self, metrics):
for metric in metrics:
if metric.ifindex == 160:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Pulse", metric.labels),
labels=[f"noc::sensor::{port}"],
value=value,
)
@metrics(["Environment | Power | Input | Status"], volatile=False, access="S") # SNMP version
def get_power_input_status(self, metrics):
for metric in metrics:
value = self.snmp.get("1.3.6.1.4.1.35419.20.1.10%s.0" % metric.ifindex, cached=True)
port = metric.labels[0].rsplit("::", 1)[-1]
self.set_metric(
id=("Environment | Power | Input | Status", metric.labels),
labels=[f"noc::sensor::{port}"],
value=0 if value == 1 else 1,
)
| 43.149425
| 98
| 0.485615
| 3,338
| 0.889185
| 0
| 0
| 3,236
| 0.862014
| 0
| 0
| 1,094
| 0.291422
|
672ca5a86d4634cb29b428fe498eec5d2e6591d7
| 17,041
|
py
|
Python
|
clustering.py
|
t20100/ccCluster
|
9645d80dcfe579c23b3d52e8d536a39d469b184a
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
clustering.py
|
t20100/ccCluster
|
9645d80dcfe579c23b3d52e8d536a39d469b184a
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
clustering.py
|
t20100/ccCluster
|
9645d80dcfe579c23b3d52e8d536a39d469b184a
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from __future__ import print_function
__author__ = "Gianluca Santoni"
__copyright__ = "Copyright 20150-2019"
__credits__ = ["Gianluca Santoni, Alexander Popov"]
__license__ = ""
__version__ = "1.0"
__maintainer__ = "Gianluca Santoni"
__email__ = "gianluca.santoni@esrf.fr"
__status__ = "Beta"
from scipy.cluster import hierarchy
import scipy
import matplotlib.pyplot as plt
import os
import numpy as np
import subprocess
import collections
import operator
import stat
import json
import random
class Clustering():
"""
parse cc_calc output and perform HCA
at each call, it generates the distance matrix
You get the dendrogram through Clustering.tree()
"""
def __init__(self, ccCalcOutput):
self.ccFile= ccCalcOutput
self.CurrentDir = os.getcwd()
self.ccTable, self.Dimension = self.parseCCFile()
self.createLabels()
self.previousProcess()
def previousProcess(self):
"""
Lists all the clusters which have already been processed from a log file.
Updates the global variable alreadyDone
"""
self.alreadyDone= []
if os.path.isfile(os.getcwd()+'/.cc_cluster.log'):
with open(os.getcwd()+'/.cc_cluster.log') as log:
for line in log:
L = line.split(',')
self.alreadyDone.append([L[1], L[2].strip(), L[3].strip()])
def parseCCFile(self):
"""
Gets data from ccCalc ouput file and populates a numpy array with the distances
"""
with open(self.ccFile, 'r') as f:
dataArr = None
data=[]
Index = []
for line in f:
if line.strip() == 'Correlation coefficients':
break
for line in f:
dataline= line.rstrip().split()
data.append(dataline)
Index.append(int(dataline[0])+1)
Index.append(int(dataline[1])+1)
Dimension=max(Index)
dataArr = np.array(data,dtype=(float))
return dataArr, Dimension
def createLabels(self):
"""
Gets the labels from the ccCalc output with the input file names
"""
self.labelList= []
with open(self.ccFile) as f:
for line in f:
if line.strip() == 'Labels':
break
for line in f:
if line.strip() == 'Correlation coefficients':
break
goodLine = line.split()
self.labelList.append("%s"%(goodLine[2].strip('\n')))
return self.labelList
def inputType(self):
"""
return input file type. Either mtz or HLK
"""
element = self.labelList[0]
extension = element.split('.')[-1]
print(extension)
return extension
def tree(self):
"""
Returns the HCA dendrogrm, using the complete linkage method
"""
data = self.ccTable
Matrix=np.zeros((self.Dimension,self.Dimension))
reducedArray=[]
for line in data:
#print line
if line is not None and len(line) is not 0:
Matrix[int(line[0]),int(line[1])]= line[2]
Matrix[int(line[1]),int(line[0])]= line[2]
for x in range(0,self.Dimension):
for y in range(x+1,self.Dimension):
reducedArray.append(Matrix[x,y])
Distances = np.array(reducedArray, dtype=(float))
self.Tree =hierarchy.linkage(Distances, 'complete')
return self.Tree
def avgTree(self):
"""
Returns the HCA dendrogrm, using the average linkage method
"""
data = self.ccTable
Matrix=np.zeros((self.Dimension,self.Dimension))
reducedArray=[]
for line in data:
#print line
if line is not None and len(line) is not 0:
Matrix[int(line[0]),int(line[1])]= line[2]
Matrix[int(line[1]),int(line[0])]= line[2]
for x in range(0,self.Dimension):
for y in range(x+1,self.Dimension):
reducedArray.append(Matrix[x,y])
Distances = np.array(reducedArray, dtype=(float))
self.Tree =hierarchy.linkage(Distances, 'average')
return self.Tree
def flatClusterPrinter(self, thr, labelsList, anomFlag):
"""
Prints the flat cluster at a chosen threshold to a .json file
"""
FlatC=hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
clusterToJson={}
clusterToJson['HKL']=[]
Best = max(counter.items(), key=operator.itemgetter(1))[0]
clusterFile = open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/flatCluster.json'%(float(thr),Best, anomFlag), 'w')
for cluster, hkl in zip(FlatC, labelsList):
clusterToJson['HKL'].append({
'input_file':hkl,
'cluster':str(cluster)
})
print(clusterToJson)
j = json.dumps(clusterToJson, indent=4)
print(j, file=clusterFile)
def thrEstimation(self):
"""
Estimates the threshold for optimal clustering, based on the multiplicity of the biggest cluster
"""
x = 0.00
dx = 0.05
countsList = []
x_list = []
while x < 1:
FlatC = hierarchy.fcluster(self.Tree, x, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
countsList.append(counter[Best])
x+= dx
x_list.append(x)
dy = np.diff(countsList)
for a, b in zip (x_list, dy):
if b == max(dy):
return a
def checkMultiplicity(self, thr):
"""
Prints the multiplicity of the biggest cluster at a given threshold
"""
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
print('You are clustering with a threshold of %s'%(thr))
print('The biggest cluster contains %s datasets from a total of %s'%(counter[Best], len(self.labelList)))
def completenessEstimation(self):
x = 0.00
dx = 0.05
while x > 1:
FlatC = hierarchy.fcluster(self.Tree, x, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
# the list self.ToProcess is needed by the scaling routines
# fix all this new mess!
def whatToProcess(self):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
return self.ToProcess
#Run XSCALE to merge the biggest cluster
#input files
#!!!! Will need to define the processes to run externally
#renaming function! Edit the calls in ccCluster accordingly
def prepareXSCALE(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
os.mkdir(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s'%(float(thr),x, anomFlag))
Xscale=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/XSCALE.INP'%(float(thr),x, anomFlag), 'a')
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), 'a')
print('OUTPUT_FILE=scaled.hkl',file=Xscale)
print('MERGE= TRUE', file=Xscale)
print('pointless hklout clustered.mtz << eof', file=Pointless)
if anomFlag=='ano':
print('FRIEDEL\'S_LAW= FALSE', file=Xscale)
elif anomFlag=='no_ano':
print('FRIEDEL\'S_LAW= TRUE', file=Xscale)
Xscale.close()
Pointless.close()
for cluster, filename in zip(FlatC,self.labelList):
if cluster in self.ToProcess:
OUT = open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/XSCALE.INP'%(float(thr),cluster,anomFlag), 'a')
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),cluster,anomFlag), 'a')
print ('INPUT_FILE= ../%s'%(filename), file=OUT)
#print ('INCLUDE_RESOLUTION_RANGE=20, 1.8', file=OUT)
print ('MINIMUM_I/SIGMA= 0', file=OUT)
print ('XDSIN ../%s'%(filename), file= Pointless)
OUT.close()
Pointless.close()
def preparePointless(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
Process = True
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
os.mkdir(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s'%(float(thr),x, anomFlag))
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), 'a')
print('pointless hklout clustered.mtz << eof', file=Pointless)
print('XMLOUT pointlessLog.xml', file=Pointless)
Pointless.close()
for cluster, filename in zip(FlatC,self.labelList):
if cluster in self.ToProcess:
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),cluster,anomFlag), 'a')
print ('HKLIN ../%s'%(filename), file= Pointless)
Pointless.close()
#Run XSCALE in the pre-determined folders.
def scaleAndMerge(self, anomFlag, thr):
newProcesses=[]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
#self.createDendrogram(thr)
#plt.savefig(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/Dendrogram.png'%(float(thr),x,anomFlag))
P= subprocess.Popen('xscale_par',cwd=self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/'%(float(thr), x, anomFlag))
P.wait()
# print('Cluster, %s , %s , %s'%(float(thr),x, anomFlag), file=Log)
newProcesses.append([thr,x, anomFlag])
#run Pointless in each folder from the processing List
def pointlessRun(self, anomFlag, thr):
newProcesses=[]
for x in self.ToProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
Pointless=open(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag), 'a')
print('COPY \n bg\n TOLERANCE 4 \n eof', file= Pointless)
Pointless.close()
st = os.stat(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ))
os.chmod(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh'%(float(thr),x,anomFlag ), st.st_mode | 0o111)
P = subprocess.Popen(self.CurrentDir+'/cc_Cluster_%.2f_%s_%s/launch_pointless.sh > pointless.log')
P.wait()
#run aimless on the output from pointless
#will run in folders with clustered.mtz file available.
#TBD: fix directories paths into the aimless.inp file
#also set all the proper input values into the function call
#path to aimless executable to be verified.
def aimlessRun(self, anomFlag, thr):
for x in self.toProcess:
if [thr,x, anomFlag] not in self.alreadyDone:
f1= open("aimless.inp", 'w')
runScript='''#!/bin/bash
source /opt/pxsoft/ccp4/vdefault/linux-x86_64/ccp4-7.0/setup-scripts/ccp4.setup-sh
aimless HKLIN {infile} << EOF
HKLOUT {setname}_aimless.mtz'
RESOLUTION LOW {resLow} HIGH {resHigh}
OUTPUT MERGED
anomalous {anomflag}
EOF
#truncate: generate Fs
truncate hklin {setname}_aimless.mtz hklout {setname}_tr.mtz <<EOF-trunc
truncate yes
EOF-trunc
#unique: generate unique reflection set for rfree
unique HKLOUT x_unq.mtz << EOF
CELL {cell}
SYMMETRY '{SpaceGroup}'
LABOUT F=FUNI SIGF=SIGFUNI
RESOLUTION {resHigh}
EOF
#freerflag: generate free reflections
freerflag HKLIN x_unq.mtz HKLOUT x_FreeR_unq.mtz <<EOF
FREERFRAC 0.05
END
EOF
#cad: combine free reflections with data
cad HKLIN1 x_FreeR_unq.mtz HKLIN2 {setname}_tr.mtz HKLOUT {setname}_cad.mtz<<EOF
LABI FILE 1 E1=FreeR_flag
LABI FILE 2 ALLIN
END
EOF
freerflag HKLIN {setname}_cad.mtz HKLOUT {setname}_scaled.mtz <<EOF
COMPLETE FREE=FreeR_flag
END
EOF
'''.format(infile = 'clustered.mtz', setname = 'clustered', resHigh = '1.0', resLow = '60', anomflag = 'ON', cell = cell, SpaceGroup = SpaceGroup)
f1.write(runScript)
f1.close()
os.chmod(CurrentDir + '/aimless.inp', st.st_mode | 0o111)
subprocess.call('./aimless.inp > aimless.log', cwd=CurrentDir, shell=True)
# A function to investigate the influence of reference file in merging results
def shuffleXscale(self, anomFlag, thr):
FlatC = hierarchy.fcluster(self.Tree, thr, criterion='distance')
Log = open(self.CurrentDir+'/.cc_cluster.log', 'a')
counter=collections.Counter(FlatC)
Best = max(counter.items(), key=operator.itemgetter(1))[0]
print(Best)
Process = True
xscaleInputFiles=[]
#change checkboxes to standard variables
if Process:
self.ToProcess = [Best]
else:
self.ToProcess = set(Clusters)
for key in self.ToProcess:
if counter[key]==1:
self.ToProcess = [x for x in self.ToProcess if x != key]
#Prepare list of filenames to shuffle over
for cluster, filename in zip(FlatC, self.labelList):
if cluster in self.ToProcess:
xscaleInputFiles.append(filename)
print(xscaleInputFiles)
#run XSCALE with random ordered files 20 times
for x in range(0,20):
os.mkdir(self.CurrentDir+'/thr_%.2f_run_%s'%(float(thr),x))
Xscale=open(self.CurrentDir+'/thr_%.2f_run_%s/XSCALE.INP'%(float(thr),x), 'a')
print('OUTPUT_FILE=scaled.hkl',file=Xscale)
print('MERGE= TRUE', file=Xscale)
print('FRIEDEL\'S_LAW=TRUE', file=Xscale )
random.shuffle(xscaleInputFiles)
for hkl in xscaleInputFiles:
print ('INPUT_FILE= ../%s'%(hkl), file=Xscale)
print ('MINIMUM_I/SIGMA= 0', file=Xscale)
P= subprocess.Popen('xscale_par',cwd=self.CurrentDir+'/thr_%.2f_run_%s'%(float(thr),x))
P.wait()
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog --XSCALEfile=<LP filename> --outname=<output dendogram>")
parser.add_option("-o","--outname", dest="outname", default='Dendrogram', help="output dendogram file name")
parser.add_option("-t", "--threshold", dest="threshold", default='0.4', help="Distance threshold for clustering")
parser.add_option("-c", "--count",action="store_true", dest="count", default=False, help="Counts datasets in the biggest cluster and exit")
(options, args) = parser.parse_args()
thr = float(options.threshold)
CC = Clustering('Cluster_log.txt')
link = CC.tree()
if options.count:
CC.checkMultiplicity(thr)
print(CC.thrEstimation())
else:
CC.checkMultiplicity(thr)
CC.merge('ano', thr)
if __name__== '__main__':
main()
| 38.123043
| 146
| 0.589813
| 15,621
| 0.916672
| 0
| 0
| 0
| 0
| 0
| 0
| 5,134
| 0.301273
|
672fde99dcb82eabf8b0425ec9a63d4e04194da7
| 9,992
|
py
|
Python
|
wrappaconda.py
|
nckz/wrappaconda
|
43203be36f2de17fdf8fe77c151c5628bd98321f
|
[
"BSD-2-Clause"
] | null | null | null |
wrappaconda.py
|
nckz/wrappaconda
|
43203be36f2de17fdf8fe77c151c5628bd98321f
|
[
"BSD-2-Clause"
] | null | null | null |
wrappaconda.py
|
nckz/wrappaconda
|
43203be36f2de17fdf8fe77c151c5628bd98321f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# Author: Nick Zwart
# Date: 2015oct31
from __future__ import print_function
import os
import sys
import stat
import errno
import shutil
import optparse
import traceback
import subprocess
wrappaconda_name_string = 'Wr[App]-A-Conda'
class AppAtizer(object):
def __init__(self):
# tmp paths
self._downloads_prefix = os.path.expanduser('~/Downloads')
if not os.path.isdir(self._downloads_prefix):
self._downloads_prefix = './' # use cwd
# try for wget or curl
self._get = self._getDownloaderCommand()
# cli input
self._parseUserInput()
# .app paths
self._apppath = '/Applications/'+self._name+'.app'
self._contents_prefix = self._apppath + "/Contents"
self._resource_prefix = self._contents_prefix + "/Resources"
self._info_plist_path = self._contents_prefix + "/Info.plist"
self._pkg_info_path = self._contents_prefix + "/PkgInfo"
self._macos_prefix = self._contents_prefix + "/MacOS"
self._cfbundle_icon_filename = 'app.icns'
# Wr[App]-A-Conda paths
self._id_file_path = self._resource_prefix + "/wrappaconda"
# miniconda paths
self._miniconda_prefix = self._resource_prefix + "/miniconda"
self._python_path = self._miniconda_prefix + "/bin/python"
self._conda_path = self._miniconda_prefix + "/bin/conda"
def _parseUserInput(self):
# get user input
parser = optparse.OptionParser()
parser.add_option("-n", "--name", dest='name', help="[REQUIRED] The name of this app.")
parser.add_option("-t", "--target", dest='target', help="[REQUIRED] The binary or script found in Anaconda\'s $PREFIX/bin.")
parser.add_option("-v", "--version", dest='version', help="The version of this app.", default='0.1')
parser.add_option("-i", "--icon", dest='icon_file', help="Icon file to be used in the bundle.")
parser.add_option("-c", "--channel", dest='channel', help="The Anaconda.org package channel(s), or url(s) separated by commas (e.g. nckz,https://conda.anaconda.org/gpi/channel/rc) (defaults to \'defaults\')", default='defaults')
parser.add_option("-p", "--package", dest='package', help="The package name(s) separated by commas (e.g. scipy=0.15.0,curl=7.26.0,pip).")
parser.add_option("-r", "--rootenv", dest='rootenv', help="A root environment file (created using: \'conda list --export\').")
parser.add_option("--py", dest='py_ver', help="Choose the distro python version using the major and minor version numbers (defaults to 3.5).", default='3.5')
parser.add_option("-o", "--overwrite", action="store_true", dest='overwrite', help="Overwrite an existing app with the same \'name\'. Use caution!!!")
options, args = parser.parse_args()
try:
# check for input errors
assert options.name is not None
assert options.target is not None
if options.icon_file is not None:
assert os.path.isfile(options.icon_file)
assert options.icon_file.endswith(".icns")
if options.rootenv is not None:
assert os.path.isfile(options.rootenv)
except:
parser.print_help()
raise
self._name = options.name
self._version = options.version
self._target = options.target
self._icon_file = options.icon_file
self._channel = options.channel
self._package = options.package
self._root_env = options.rootenv
self._py_ver = options.py_ver
self._overwrite = options.overwrite
def _getDownloaderCommand(self):
# check for installed utilities
try:
subprocess.check_output('command -v wget >/dev/null 2>&1;', shell=True)
return 'wget --directory-prefix ' + self._downloads_prefix + ' -c {}'
except:
try:
subprocess.check_output('command -v curl >/dev/null 2>&1;', shell=True)
return 'cd '+self._downloads_prefix+' && curl --fail -O -C - {} '
except:
print("This script requires \'wget\' or \'curl\' and neither were found.")
raise
def appPath(self):
return self._apppath
def deleteExistingApp(self):
if os.path.exists(self._apppath):
if self._overwrite:
print("Removing existing path: "+self._apppath)
try:
with open(self._id_file_path, 'r') as f:
assert f.read().count(wrappaconda_name_string) > 0
shutil.rmtree(self._apppath)
except:
print("The app \'"+self._apppath+"\' cannot be verified for deletion. You may have to remove it manually. Skipping...")
else:
print("The app \'"+self._apppath+"\' already exists, exiting...")
def buildAppSkeleton(self):
# build the .app directory and supporting files
try:
os.mkdir(self._apppath)
os.mkdir(self._contents_prefix)
os.mkdir(self._macos_prefix)
os.mkdir(self._resource_prefix)
except OSError as e:
if e.errno == errno.EPERM:
print("You must have root permissions to write to /Applications.")
raise
def copyIconFile(self):
if self._icon_file is not None:
shutil.copy(self._icon_file, self._resource_prefix + '/' + self._cfbundle_icon_filename)
def writeInfoPList(self):
# http://stackoverflow.com/questions/7404792/how-to-create-mac-application-bundle-for-python-script-via-python
CFBundleName = self._name
CFBundleVersion = self._version
CFBundleIconFile = self._cfbundle_icon_filename
CFBundleGetInfoString = CFBundleName + " " + CFBundleVersion
CFBundleShortVersionString = CFBundleGetInfoString
CFBundleIdentifier = "com.gpilab."+CFBundleName
CFBundleExecutable = self._target
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>%s</string>
<key>CFBundleGetInfoString</key>
<string>%s</string>
<key>CFBundleIconFile</key>
<string>%s</string>
<key>CFBundleIdentifier</key>
<string>%s</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>%s</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>%s</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>%s</string>
<key>NSAppleScriptEnabled</key>
<string>YES</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>
"""
with open(self._info_plist_path, "w") as f:
f.write(info_plist % (CFBundleExecutable, CFBundleGetInfoString, CFBundleIconFile, CFBundleIdentifier, CFBundleName, CFBundleShortVersionString, CFBundleVersion))
def writePkgInfo(self):
with open(self._pkg_info_path, "w") as f:
f.write("APPL????")
def writeWrappacondaIDFile(self):
with open(self._id_file_path, "w") as f:
f.write("This app was generated by " + wrappaconda_name_string)
def setupMiniconda(self):
# anaconda website and miniconda package info
# -python 3 is the default miniconda
MINICONDA_NAME='Miniconda3'
if float(self._py_ver) < 3:
MINICONDA_NAME='Miniconda'
MINICONDA_WEB='https://repo.continuum.io/miniconda/'
MINICONDA_OSX=MINICONDA_NAME+'-latest-MacOSX-x86_64.sh'
# download miniconda
try:
cmd = self._get.format(MINICONDA_WEB+MINICONDA_OSX)
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to download miniconda.")
# install miniconda
try:
os.chmod(self._downloads_prefix+'/'+MINICONDA_OSX, 0o777)
cmd = self._downloads_prefix+'/'+MINICONDA_OSX+' -b -p '+self._miniconda_prefix
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to run miniconda.")
# install central conda package
if self._package:
try:
python = ' python=='+self._py_ver+' '
conda_cmd = self._conda_path+' install -y -c '+' -c '.join(self._channel.split(','))+' '+' '.join(self._package.split(',')) + python
if self._root_env:
conda_cmd += ' --file '+self._root_env
print(conda_cmd)
subprocess.check_output(conda_cmd, shell=True)
subprocess.check_output(self._conda_path+' clean -t -i -p -l -y', shell=True)
except:
print("Failed to run conda.")
raise
def linkTarget(self):
# check for the existence of the target
try:
assert os.path.isfile(self._miniconda_prefix + '/bin/' + self._target)
os.link(self._miniconda_prefix + '/bin/' + self._target, self._macos_prefix + '/' + self._target)
except:
print(self._target, ' doesn\'t exist in Miniconda bin.')
raise
def main():
make = AppAtizer()
make.deleteExistingApp()
make.buildAppSkeleton()
make.writeWrappacondaIDFile()
make.copyIconFile()
make.setupMiniconda()
make.linkTarget()
make.writeInfoPList()
make.writePkgInfo()
print(make.appPath() + " has been created.")
if __name__ == '__main__':
main()
| 39.650794
| 236
| 0.618995
| 9,394
| 0.940152
| 0
| 0
| 0
| 0
| 0
| 0
| 3,503
| 0.35058
|
673174539407b646c8c0d2d08573c676c84a2fa0
| 557
|
py
|
Python
|
watchtower/wallet/wallet.py
|
paytaca/watchtower-py
|
a9a4fb83ba4a9a15379efdd41bb91546821b4be8
|
[
"MIT"
] | null | null | null |
watchtower/wallet/wallet.py
|
paytaca/watchtower-py
|
a9a4fb83ba4a9a15379efdd41bb91546821b4be8
|
[
"MIT"
] | null | null | null |
watchtower/wallet/wallet.py
|
paytaca/watchtower-py
|
a9a4fb83ba4a9a15379efdd41bb91546821b4be8
|
[
"MIT"
] | null | null | null |
import requests
class Wallet(object):
def __init__(self, testnet=False):
if testnet:
self.base_url = 'https://testnet.watchtower.cash/api/'
else:
self.base_url = 'https://watchtower.cash/api/'
def _get_utxos(self, wallet_hash, amount):
url = self.base_url + f'utxo/wallet/{wallet_hash}'
resp = requests.get(url)
print(resp.status_code)
print(resp.json())
def send(self, amount):
self._get_utxos('abcd0123456', amount)
print(f"Sending {amount} BCH...")
| 26.52381
| 66
| 0.606822
| 538
| 0.965889
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.24237
|
6733155cbc1b3ee12cbd1d7e111f38daa85f1326
| 858
|
py
|
Python
|
test/unit/test_finalize.py
|
phated/binaryen
|
50e66800dc28d67ea1cc88172f459df1ca96507d
|
[
"Apache-2.0"
] | 5,871
|
2015-11-13T19:06:43.000Z
|
2022-03-31T17:40:21.000Z
|
test/unit/test_finalize.py
|
sthagen/binaryen
|
ce592cbdc8e58f36e7f39a3bd24b403f43adae34
|
[
"Apache-2.0"
] | 2,743
|
2015-11-13T03:46:49.000Z
|
2022-03-31T20:27:05.000Z
|
test/unit/test_finalize.py
|
sthagen/binaryen
|
ce592cbdc8e58f36e7f39a3bd24b403f43adae34
|
[
"Apache-2.0"
] | 626
|
2015-11-23T08:00:11.000Z
|
2022-03-17T01:58:18.000Z
|
from scripts.test import shared
from . import utils
class EmscriptenFinalizeTest(utils.BinaryenTestCase):
def do_output_test(self, args):
# without any output file specified, don't error, don't write the wasm,
# but do emit metadata
p = shared.run_process(shared.WASM_EMSCRIPTEN_FINALIZE + [
self.input_path('empty_lld.wat'), '--global-base=1024'
] + args, capture_output=True)
# metadata is always present
self.assertIn('{', p.stdout)
self.assertIn('}', p.stdout)
return p.stdout
def test_no_output(self):
stdout = self.do_output_test([])
# module is not present
self.assertNotIn('(module', stdout)
def test_text_output(self):
stdout = self.do_output_test(['-S'])
# module is present
self.assertIn('(module', stdout)
| 33
| 79
| 0.637529
| 803
| 0.935897
| 0
| 0
| 0
| 0
| 0
| 0
| 226
| 0.263403
|
67351c5ed22ca30713ae796c8d4fe75b64c848ee
| 6,206
|
py
|
Python
|
mc/tools/TreeWidget.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2021-04-29T05:36:44.000Z
|
2021-04-29T05:36:44.000Z
|
mc/tools/TreeWidget.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2020-03-28T17:43:18.000Z
|
2020-03-28T17:43:18.000Z
|
mc/tools/TreeWidget.py
|
zy-sunshine/falkon-pyqt5
|
bc2b60aa21c9b136439bd57a11f391d68c736f99
|
[
"MIT"
] | 1
|
2021-01-15T20:09:24.000Z
|
2021-01-15T20:09:24.000Z
|
from PyQt5.QtWidgets import QTreeWidget
from PyQt5.Qt import pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
from PyQt5.Qt import Qt
class TreeWidget(QTreeWidget):
# enum ItemShowMode
ItemsCollapsed = 0
ItemsExpanded = 1
def __init__(self, parent=None):
super().__init__(parent)
self._refreshAllItemsNeeded = True
self._allTreeItems = [] # QList<QTreeWidgetItem>
self._showMode = self.itemCollapsed # ItemShowMode
self.itemChanged.connect(self._scheduleRefresh)
def defaultItemShowMode(self):
'''
@return: ItemShowMode
'''
return self._showMode
def setDefaultItemShowMode(self, mode):
'''
@param: item ItemShowMode
'''
self._showMode = mode
def allItems(self):
'''
@return: QList<QTreeWidgetItem>
'''
if self._refreshAllItemsNeeded:
self._allTreeItems.clear()
self.iterateAllItems(None)
self._refreshAllItemsNeeded = False
return self._allTreeItems
def appendToParentItemByText(self, parentText, item):
'''
@param: parentText QString
@param: item QTreeWidgetItem
'''
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.addChild(item)
return True
def appendToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.appendChild(item)
return True
def prependToParentItemByText(self, parentText, item):
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.insertChild(0, item)
return True
def prependToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.insertChild(0, item)
return True
def addTopLevelItem(self, item):
'''
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().addTopLevelItem(item)
def addTopLevelItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().addTopLevelItems(items)
def insertTopLevelItem(self, index, item):
'''
@param: index int
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().insertTopLevelItem(index, item)
def insertTopLevelItems(self, index, items):
'''
@param: index int
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().insertTopLevelItems(index, items)
def deleteItem(self, item):
'''
@param: item QTreeWidgetItem
'''
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
def deleteItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
for item in items:
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
# Q_SIGNALS:
itemControlClicked = pyqtSignal(QTreeWidgetItem) # item
itemMiddleButtonClicked = pyqtSignal(QTreeWidgetItem) # item
# public Q_SLOTS:
def filterString(self, string):
# QList<QTreeWidgetItem>
_allItems = self.allItems()
# QList<QTreeWidgetItem>
parents = []
stringIsEmpty = not string
strLower = string.lower()
for item in _allItems:
if stringIsEmpty:
containsString = True
else:
text = item.text(0).lower()
containsString = strLower in text
if containsString:
item.setHidden(False)
itemParent = item.parent()
if itemParent and itemParent not in parents:
parents.append(itemParent)
else:
item.setHidden(True)
itemParent = item.parent()
if itemParent:
itemParent.setHidden(True)
for parentItem in parents:
parentItem.setHidden(False)
if stringIsEmpty:
parentItem.setExpanded(self._showMode == self.itemExpanded)
else:
parentItem.setExpanded(True)
parentOfParentItem = parentItem.parent()
if parentOfParentItem and parentOfParentItem not in parents:
parents.append(parentOfParentItem)
def clear(self):
super().clear()
self._allTreeItems.clear()
# private Q_SLOTS:
def _scheduleRefresh(self):
self._refreshAllItemsNeeded = True
# private:
def mousePressEvent(self, event):
'''
@param: event QMouseEvent
'''
if event.modifiers() == Qt.ControlModifier:
self.itemControlClicked.emit(self.itemAt(event.pos()))
if event.buttons() == Qt.MiddleButton:
self.itemMiddleButtonClicked.emit(self.itemAt(event.pos()))
super().mousePressEvent(event)
def iterateAllItems(self, parent):
'''
@param: parent QTreeWidgetItem
'''
if parent:
count = parent.childCount()
else:
count = self.topLevelItemCount()
for idx in range(count):
if parent:
item = parent.child(idx)
else:
item = self.topLevelItem(idx)
if item.childCount() == 0:
self._allTreeItems.append(item)
self.iterateAllItems(item)
| 28.731481
| 75
| 0.586046
| 6,064
| 0.977119
| 0
| 0
| 0
| 0
| 0
| 0
| 935
| 0.150661
|
673572261f6221c9f0594203352cc527924c075f
| 1,400
|
py
|
Python
|
app/api/v2/models/sales.py
|
danuluma/dannstore
|
e5b59f08542c1cacdac60e380b5c2945195ba64a
|
[
"MIT"
] | null | null | null |
app/api/v2/models/sales.py
|
danuluma/dannstore
|
e5b59f08542c1cacdac60e380b5c2945195ba64a
|
[
"MIT"
] | 21
|
2018-10-16T09:29:03.000Z
|
2022-03-11T23:31:35.000Z
|
app/api/v2/models/sales.py
|
danuluma/dannstore
|
e5b59f08542c1cacdac60e380b5c2945195ba64a
|
[
"MIT"
] | null | null | null |
import os
import sys
LOCALPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, LOCALPATH + '/../../../../')
from app.api.v2.db import Db
def format_sale(sale):
"""Formats the results to a dictionary"""
sale = {
"id": sale[0],
"books": sale[1],
"total": sale[2],
"created_by": sale[3],
"attendant_name": sale[5],
"created_at": str(sale[4])
}
return sale
class SalesModel(Db):
"""Sales Model. Sales Records stuff here"""
def get_all_sales(self):
"""Gets all sales records from the db"""
records = []
for sale in Db().get_query('sales'):
details = format_sale(sale)
records.append(details)
return records
def get_single_sale(self, param, this_col):
"""Gets a single sale record"""
records = [row for row in Db().get_query(
'sales') if row[this_col] == param]
if records:
sale = records[0]
return format_sale(sale)
def add_new_record(self, new_sale):
"""Adds a new sale record to the db"""
try:
Db().db_query(f"""
INSERT INTO sales (book_id, total, created_by, attendant)
VALUES (ARRAY{new_sale[0]}, {new_sale[1]}, {new_sale[2]}, '{new_sale[3]}');
""")
except:
return "Failed to add", 500
| 25.454545
| 89
| 0.547143
| 959
| 0.685
| 0
| 0
| 0
| 0
| 0
| 0
| 479
| 0.342143
|
67366bf1792d0f436d2ce6181f326bfb3e3aea15
| 4,035
|
py
|
Python
|
ubirch/linux/bleManager.py
|
ubirch/ubirch-ble-tool
|
1399d018957e9a8424071296a71431c8ffa27e6f
|
[
"Apache-2.0"
] | 4
|
2018-07-20T16:35:52.000Z
|
2020-11-12T13:38:58.000Z
|
ubirch/linux/bleManager.py
|
ubirch/ubirch-ble-tool
|
1399d018957e9a8424071296a71431c8ffa27e6f
|
[
"Apache-2.0"
] | 1
|
2021-04-03T13:37:40.000Z
|
2021-04-03T13:37:40.000Z
|
ubirch/linux/bleManager.py
|
ubirch/ubirch-ble-tool
|
1399d018957e9a8424071296a71431c8ffa27e6f
|
[
"Apache-2.0"
] | null | null | null |
from bleSuite import bleConnectionManager, bleServiceManager
from bluepy.btle import Scanner
from ubirch.linux.bleServiceManager import BLEServiceManager
class BLEManager(object):
""" BLE network manager """
def __init__(self, address, adapter, addressType, securityLevel, createRequester, psm=0, mtu=0):
""" Create an instance of BLE Manager """
self.address = address
self.cm = bleConnectionManager.BLEConnectionManager(address, adapter, addressType, securityLevel)
self.sm = BLEServiceManager(self.cm, self.address)
def connectDevice(self):
""" conect tot he BLE device
take manager object or mac address as a input variable
"""
self.cm.connect()
def disconnectDevice(self):
self.cm.disconnect()
def discoverDevice(self, name, timeout=5):
sm = BLEScanDevices()
deviceList = sm.scan(timeout)
for device in deviceList:
for values in device.getScanData():
if values[2].rstrip('\x00') == name:
return str(device.addr)
raise Exception("NO DEVICE FOUND")
def discoverServices(self):
return bleServiceManager.bleServiceDiscovery(self.address, self.cm)
def discoverCharacteristics(self):
ignoreUUID = ["00001800-0000-1000-8000-00805f9b34fb", "00001801-0000-1000-8000-00805f9b34fb"]
devServices = self.discoverServices()
devCharList = []
for service in devServices.services:
if not (service.uuid in ignoreUUID):
for characteristics in service.characteristics:
devCharList.append(characteristics.uuid)
return devCharList
raise Exception("No Services Found")
def write(self, handle, data):
bleServiceManager.bleServiceWriteToHandle(self.cm, handle, data)
def read(self, handle):
# TODO add a function getHandlebyUUID toget handle using uuid
# helps to read data on both mac n linux with ease
return bleServiceManager.bleServiceReadByHandle(self.cm, handle)
def isConnected(self):
return self.cm.isConnected()
# Services and Characteristics
def bleServiceWriteToHandle(self, handle, data):
return bleServiceManager.bleServiceWriteToHandle(self.cm, handle, data)
def bleServiceReadByHandle(self, handle):
return bleServiceManager.bleServiceReadByHandle(self.cm, handle)
def bleServiceReadByUUID(self, uuid):
return bleServiceManager.bleServiceReadByUUID(self.cm, uuid)
def bleDiscoverServices(self):
return bleServiceManager.bleServiceDiscovery(self.address, self.cm)
def showServices(self):
bledevice = bleServiceManager.bleServiceDiscovery(self.address, self.cm)
bledevice.printDeviceStructure()
def bleGetHandlefromUUID(self, uuid):
bledevice = bleServiceManager.bleServiceDiscovery(self.address, self.cm)
for service in bledevice.services:
# print service.uuid
for characteristic in service.characteristics:
# print characteristic.uuid
if uuid == characteristic.uuid:
return characteristic.valueHandle
return -1
def bleServiceWriteByUUID(self, uuid, data):
handle = self.bleGetHandlefromUUID(uuid)
return self.bleServiceWriteToHandle(handle, data)
class BLEScanDevices(object):
def __init__(self):
self.sm = Scanner()
def scan(self, timeOut=10):
return self.sm.scan(timeOut)
def stopScan(self):
pass
def isScanning(self):
pass
def getDeviceAddress(self, deviceName, timeOut=10):
"""return a tuple containing DeviceName and DeiceAddress"""
deviceList = self.scan(timeOut)
for device in deviceList:
for values in device.getScanData():
if values[2].rstrip('\x00') == deviceName:
return str(device.addr)
raise Exception("NO DEVICE FOUND")
| 34.487179
| 105
| 0.666419
| 3,875
| 0.960347
| 0
| 0
| 0
| 0
| 0
| 0
| 563
| 0.139529
|
67366ca8b5a32e45010c5e5c8a95158feb06f5b0
| 1,952
|
py
|
Python
|
sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py
|
SidneyAn/config
|
d694cc5d79436ea7d6170881c23cbfc8441efc0f
|
[
"Apache-2.0"
] | null | null | null |
sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py
|
SidneyAn/config
|
d694cc5d79436ea7d6170881c23cbfc8441efc0f
|
[
"Apache-2.0"
] | null | null | null |
sysinv/cgts-client/cgts-client/cgtsclient/v1/load.py
|
SidneyAn/config
|
d694cc5d79436ea7d6170881c23cbfc8441efc0f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015-2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['software_version', 'compatible_version',
'required_patches']
IMPORT_ATTRIBUTES = ['path_to_iso', 'path_to_sig', 'active']
class Load(base.Resource):
def __repr__(self):
return "<loads %s>" % self._info
class LoadManager(base.Manager):
resource_class = Load
def list(self):
return self._list('/v1/loads/', "loads")
def get(self, load_id):
path = '/v1/loads/%s' % load_id
try:
return self._list(path)[0]
except IndexError:
return None
def _create_load(self, load, path):
if set(load.keys()) != set(CREATION_ATTRIBUTES):
raise exc.InvalidAttribute()
return self._create(path, load)
def create(self, load):
path = '/v1/loads/'
self._create_load(load, path)
def import_load_metadata(self, load):
path = '/v1/loads/import_load_metadata'
return self._create_load(load, path)
def import_load(self, **kwargs):
path = '/v1/loads/import_load'
active = None
load_info = {}
for (key, value) in kwargs.items():
if key in IMPORT_ATTRIBUTES:
if key == 'active':
active = value
else:
load_info[key] = value
else:
raise exc.InvalidAttribute(key)
json_data = self._upload_multipart(
path, body=load_info, data={'active': active}, check_exceptions=True)
return self.resource_class(self, json_data)
def delete(self, load_id):
path = '/v1/loads/%s' % load_id
return self._delete(path)
def update(self, load_id, patch):
path = '/v1/loads/%s' % load_id
return self._update(path, patch)
| 26.378378
| 81
| 0.589139
| 1,617
| 0.828381
| 0
| 0
| 0
| 0
| 0
| 0
| 335
| 0.171619
|
6736c3bf19a38443467bf3214084087a92e23009
| 10,984
|
py
|
Python
|
tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
|
haihabi/model_optimization
|
97372a9596378bb2287c59f1180b5059f741b2d6
|
[
"Apache-2.0"
] | null | null | null |
tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
|
haihabi/model_optimization
|
97372a9596378bb2287c59f1180b5059f741b2d6
|
[
"Apache-2.0"
] | null | null | null |
tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
|
haihabi/model_optimization
|
97372a9596378bb2287c59f1180b5059f741b2d6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from model_compression_toolkit.tpc_models.default_tp_model import get_op_quantization_configs
from model_compression_toolkit.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc
from tests.common_tests.helpers.generate_test_tp_model import generate_mixed_precision_test_tp_model
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as mct
from model_compression_toolkit.common.mixed_precision.kpi import KPI
from model_compression_toolkit.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common.user_info import UserInformation
from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
from tests.common_tests.helpers.tensors_compare import cosine_similarity
keras = tf.keras
layers = keras.layers
tp = mct.target_platform
class MixedPercisionBaseTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_input_shapes(self):
return [[self.val_batch_size, 224, 244, 3]]
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.Conv2D(30, 40)(inputs)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(50, 40)(x)
outputs = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info: UserInformation = None):
# This is a base test, so it does not check a thing. Only actual tests of mixed precision
# compare things to test.
raise NotImplementedError
class MixedPercisionManuallyConfiguredTest(MixedPercisionBaseTest):
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 8), (2, 8), (3, 8)])
return generate_keras_default_tpc(name="mp_test", tp_model=mp_tp_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE, mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True, weights_bias_correction=True,
weights_per_channel_threshold=False, input_scaling=True,
activation_channel_equalization=True)
return MixedPrecisionQuantizationConfig(qc)
def get_kpi(self):
# Return some KPI (it does not really matter the value here as search_methods is not done,
# and the configuration is
# set manually)
return KPI(1)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert quantization_info.mixed_precision_cfg == [2, 1]
self.unit_test.assertTrue(np.unique(quantized_model.layers[2].weights[0]).flatten().shape[0] <= 4)
self.unit_test.assertTrue(np.unique(quantized_model.layers[4].weights[0]).flatten().shape[0] <= 8)
class MixedPercisionSearchTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
class MixedPercisionSearchKPI4BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 4 bits on average
return KPI(2544140 * 4 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [1, 1]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 16)
class MixedPercisionSearchKPI2BitsAvgTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
# kpi is for 2 bits on average
return KPI(2544200 * 2 / 8)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [2, 2]).all()
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[4].weights[0][:, :, :, i]).flatten().shape[0] <= 4)
class MixedPercisionDepthwiseTest(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_kpi(self):
return KPI(np.inf)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.DepthwiseConv2D(30)(inputs)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
model = keras.Model(inputs=inputs, outputs=x)
return model
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
y = float_model.predict(input_x)
y_hat = quantized_model.predict(input_x)
cs = cosine_similarity(y, y_hat)
self.unit_test.assertTrue(np.isclose(cs, 1), msg=f'fail cosine similarity check:{cs}')
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
base_config = base_config.clone_and_edit(weights_n_bits=16,
activation_n_bits=16)
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=base_config,
mp_bitwidth_candidates_list=[(8, 16), (2, 16), (4, 16),
(16, 16)])
return generate_keras_default_tpc(name="mp_dw_test", tp_model=mp_tp_model)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=False,
weights_bias_correction=False,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc)
class MixedPrecisionActivationDisabled(MixedPercisionBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
def get_quantization_config(self):
qc = mct.QuantizationConfig(mct.QuantizationErrorMethod.MSE,
mct.QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True,
weights_bias_correction=True,
weights_per_channel_threshold=True,
input_scaling=False,
activation_channel_equalization=False)
return MixedPrecisionQuantizationConfig(qc, num_of_images=1)
def get_tpc(self):
base_config, _ = get_op_quantization_configs()
activation_disabled_config = base_config.clone_and_edit(enable_activation_quantization=False)
mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=activation_disabled_config,
mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)])
return generate_keras_default_tpc(name="mp_weights_only_test", tp_model=mp_tp_model)
def get_kpi(self):
# kpi is infinity -> should give best model - 8bits
return KPI(np.inf)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
assert (quantization_info.mixed_precision_cfg == [0,
0]).all() # kpi is infinity -> should give best model - 8bits
for i in range(30): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[1].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
for i in range(50): # quantized per channel
self.unit_test.assertTrue(
np.unique(quantized_model.layers[2].weights[0][:, :, :, i]).flatten().shape[0] <= 256)
| 47.549784
| 120
| 0.638929
| 9,287
| 0.845503
| 0
| 0
| 0
| 0
| 0
| 0
| 1,456
| 0.132556
|
67379ede0d1ebb11453ed5424da8aed4d1402f30
| 33,494
|
py
|
Python
|
src/ansible_navigator/actions/run.py
|
NaincyKumariKnoldus/ansible-navigator
|
2ac043aea4ce897f30df3c47c1444a5747c9446c
|
[
"Apache-2.0"
] | null | null | null |
src/ansible_navigator/actions/run.py
|
NaincyKumariKnoldus/ansible-navigator
|
2ac043aea4ce897f30df3c47c1444a5747c9446c
|
[
"Apache-2.0"
] | null | null | null |
src/ansible_navigator/actions/run.py
|
NaincyKumariKnoldus/ansible-navigator
|
2ac043aea4ce897f30df3c47c1444a5747c9446c
|
[
"Apache-2.0"
] | null | null | null |
""":run
"""
import curses
import datetime
import json
import logging
import os
import re
import shlex
import shutil
import time
import uuid
from math import floor
from queue import Queue
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ..action_base import ActionBase
from ..action_defs import RunStdoutReturn
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..runner import CommandAsync
from ..steps import Step
from ..ui_framework import CursesLine
from ..ui_framework import CursesLinePart
from ..ui_framework import CursesLines
from ..ui_framework import Interaction
from ..ui_framework import dict_to_form
from ..ui_framework import form_to_dict
from ..ui_framework import nonblocking_notification
from ..ui_framework import warning_notification
from ..utils.functions import abs_user_path
from ..utils.functions import human_time
from ..utils.functions import remove_ansi
from ..utils.functions import round_half_up
from ..utils.serialize import json_dump
from . import _actions as actions
from . import run_action
RESULT_TO_COLOR = [
("(?i)^failed$", 9),
("(?i)^ok$", 10),
("(?i)^ignored$", 13),
("(?i)^skipped$", 14),
("(?i)^in_progress$", 8),
]
get_color = lambda word: next( # noqa: E731
(x[1] for x in RESULT_TO_COLOR if re.match(x[0], word)),
0,
)
def color_menu(_colno: int, colname: str, entry: Dict[str, Any]) -> Tuple[int, int]:
# pylint: disable=too-many-branches
"""Find matching color for word
:param colname: A word to match
"""
colval = entry[colname]
color = 0
decoration = 0
if "__play_name" in entry:
if not colval:
color = 8
elif colname in ["__task_count", "__play_name", "__progress"]:
failures = entry["__failed"] + entry["__unreachable"]
if failures:
color = 9
elif entry["__ok"]:
color = 10
else:
color = 8
elif colname == "__changed":
color = 11
else:
color = get_color(colname[2:])
if colname == "__progress" and entry["__progress"].strip().lower() == "complete":
decoration = curses.A_BOLD
elif "task" in entry:
if entry["__result"].lower() == "__in_progress":
color = get_color(entry["__result"])
elif colname in ["__result", "__host", "__number", "__task", "__task_action"]:
color = get_color(entry["__result"])
elif colname == "__changed":
if colval is True:
color = 11
else:
color = get_color(entry["__result"])
elif colname == "__duration":
color = 12
return color, decoration
def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:
"""create a heading for some piece of content showing
:param obj: The content going to be shown
:param screen_w: The current screen width
:return: The heading
"""
if isinstance(obj, dict) and "task" in obj:
detail = f"PLAY [{obj['play']}:{obj['__number']}] "
stars = "*" * (screen_w - len(detail))
line_1 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
detail = f"TASK [{obj['task']}] "
stars = "*" * (screen_w - len(detail))
line_2 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
if obj["__changed"] is True:
color = 11
res = "CHANGED"
else:
color = next((x[1] for x in RESULT_TO_COLOR if re.match(x[0], obj["__result"])), 0)
res = obj["__result"]
if "res" in obj and "msg" in obj["res"]:
msg = str(obj["res"]["msg"]).replace("\n", " ").replace("\r", "")
else:
msg = ""
string = f"{res}: [{obj['__host']}] {msg}"
string = string + (" " * (screen_w - len(string) + 1))
line_3 = CursesLine(
(CursesLinePart(column=0, string=string, color=color, decoration=curses.A_UNDERLINE),),
)
return CursesLines((line_1, line_2, line_3))
return None
def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:
"""when showing content, filter out some keys"""
return {k: v for k, v in obj.items() if not (k.startswith("_") or k.endswith("uuid"))}
PLAY_COLUMNS = [
"__play_name",
"__ok",
"__changed",
"__unreachable",
"__failed",
"__skipped",
"__ignored",
"__in_progress",
"__task_count",
"__progress",
]
TASK_LIST_COLUMNS = [
"__result",
"__host",
"__number",
"__changed",
"__task",
"__task_action",
"__duration",
]
@actions.register
class Action(ActionBase):
# pylint: disable=too-many-instance-attributes
""":run"""
KEGEX = r"""(?x)
^
(?P<run>r(?:un)?
(\s(?P<params_run>.*))?)
$"""
def __init__(self, args: ApplicationConfiguration):
"""Initialize the ``:run`` action.
:param args: The current settings for the application
"""
super().__init__(args=args, logger_name=__name__, name="run")
self._subaction_type: str
self._msg_from_plays: Tuple[Optional[str], Optional[int]] = (None, None)
self._queue: Queue = Queue()
self.runner: CommandAsync
self._runner_finished: bool
self._auto_scroll = False
#: Flag when the first message is received from runner
self._first_message_received: bool = False
self._plays = Step(
name="plays",
step_type="menu",
columns=PLAY_COLUMNS,
value=[],
show_func=self._play_stats,
select_func=self._task_list_for_play,
)
self._task_list_columns: List[str] = TASK_LIST_COLUMNS
self._content_key_filter: Callable = filter_content_keys
@property
def mode(self):
"""if mode == stdout and playbook artifact creation is enabled
run in interactive mode, but print stdout"""
if all(
(
self._args.mode == "stdout",
self._args.playbook_artifact_enable,
self._args.app != "replay",
),
):
return "stdout_w_artifact"
return self._args.mode
def run_stdout(self) -> RunStdoutReturn:
"""Execute the ``inventory`` request for mode stdout.
:returns: The return code from the runner invocation, along with a message to review the
logs if not 0.
"""
if self._args.app == "replay":
successful: bool = self._init_replay()
if successful:
return RunStdoutReturn(message="", return_code=0)
return RunStdoutReturn(message="Please review the log for errors.", return_code=1)
self._logger.debug("playbook requested in interactive mode")
self._subaction_type = "playbook"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
self._run_runner()
while True:
self._dequeue()
if self.runner.finished:
if self._args.playbook_artifact_enable:
self.write_artifact()
self._logger.debug("runner finished")
break
# Sleep briefly to prevent 100% CPU utilization
# in mode stdout, the delay introduced by the curses key read is not present
time.sleep(0.01)
return_code = self.runner.ansible_runner_instance.rc
if return_code != 0:
return RunStdoutReturn(
message="Please review the log for errors.",
return_code=return_code,
)
return RunStdoutReturn(message="", return_code=return_code)
def run(self, interaction: Interaction, app: AppPublic) -> Union[Interaction, None]:
"""run :run or :replay
:param interaction: The interaction from the user
:param app: The app instance
:return: The pending interaction or none
"""
self._prepare_to_run(app, interaction)
if interaction.action.match.groupdict().get("run"):
self._logger.debug("run requested in interactive mode")
self._subaction_type = "run"
str_uuid = str(uuid.uuid4())
self._logger = logging.getLogger(f"{__name__}_{str_uuid[-4:]}")
self._name = f"run_{str_uuid[-4:]}"
initialized = self._init_run()
elif interaction.action.match.groupdict().get("replay"):
self._logger.debug("replay requested in interactive mode")
self._subaction_type = "replay"
self._name = "replay"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
initialized = self._init_replay()
if not initialized:
self._prepare_to_exit(interaction)
return None
self.steps.append(self._plays)
# Show a notification until the first the first message from the queue is processed
if self._subaction_type == "run":
messages = ["Preparing for automation, please wait..."]
notification = nonblocking_notification(messages=messages)
interaction.ui.show(notification)
while not self._first_message_received:
self.update()
while True:
self.update()
self._take_step()
if not self.steps:
if not self._runner_finished:
self._logger.error("Can not step back while playbook in progress, :q! to exit")
self.steps.append(self._plays)
else:
self._logger.debug(
"No steps remaining for '%s' returning to calling app",
self._name,
)
break
if self.steps.current.name == "quit":
if self._args.app == "replay":
self._prepare_to_exit(interaction)
return self.steps.current
done = self._prepare_to_quit(self.steps.current)
if done:
self._prepare_to_exit(interaction)
return self.steps.current
self.steps.back_one()
self._prepare_to_exit(interaction)
return None
# pylint: disable=too-many-branches
def _init_run(self) -> bool:
"""in the case of :run, check the user input"""
# Ensure the playbook and inventory are valid
self._update_args(
["run"] + shlex.split(self._interaction.action.match.groupdict()["params_run"] or ""),
)
if isinstance(self._args.playbook, str):
playbook_valid = os.path.exists(self._args.playbook)
else:
playbook_valid = False
if isinstance(self._args.inventory, list):
inventory_valid = all((os.path.exists(inv) for inv in self._args.inventory))
else:
# Permit running without an inventory
inventory_valid = True
if not all((playbook_valid, inventory_valid)):
populated_form = self._prompt_for_playbook()
if populated_form["cancelled"]:
return False
new_cmd = ["run"]
new_cmd.append(populated_form["fields"]["playbook"]["value"])
for field in populated_form["fields"].values():
if field["name"].startswith("inv_") and field["value"] != "":
new_cmd.extend(["-i", field["value"]])
if populated_form["fields"]["cmdline"]["value"]:
new_cmd.extend(shlex.split(populated_form["fields"]["cmdline"]["value"]))
# Parse as if provided from the cmdline
self._update_args(new_cmd)
self._run_runner()
self._logger.info("Run initialized and playbook started.")
return True
def _init_replay(self) -> bool:
"""in the case of :replay, replay the artifact
check for a version, to be safe
copy the calling app args as our our so the can be updated safely
with a uuid attached to the name
"""
self._logger.debug("Starting replay artifact request with mode %s", self.mode)
if self.mode == "interactive":
self._update_args(
["replay"]
+ shlex.split(self._interaction.action.match.groupdict()["params_replay"] or ""),
)
artifact_file = self._args.playbook_artifact_replay
if isinstance(self._args.playbook_artifact_replay, str):
artifact_valid = os.path.exists(self._args.playbook_artifact_replay)
else:
artifact_valid = False
if not artifact_valid and self.mode == "interactive":
populated_form = self._prompt_for_artifact(artifact_file=artifact_file)
if populated_form["cancelled"]:
return False
artifact_file = populated_form["fields"]["artifact_file"]["value"]
try:
with open(artifact_file, encoding="utf-8") as fh:
data = json.load(fh)
except json.JSONDecodeError as exc:
self._logger.debug("json decode error: %s", str(exc))
self._logger.error("Unable to parse artifact file")
return False
version = data.get("version", "")
if version.startswith("1."):
try:
stdout = data["stdout"]
if self.mode == "interactive":
self._plays.value = data["plays"]
self._interaction.ui.update_status(data["status"], data["status_color"])
self.stdout = stdout
else:
for line in data["stdout"]:
if self._args.display_color is True:
print(line)
else:
print(remove_ansi(line))
except KeyError as exc:
self._logger.debug("missing keys from artifact file")
self._logger.debug("error was: %s", str(exc))
return False
else:
self._logger.error(
"Incompatible artifact version, got '%s', compatible = '1.y.z'",
version,
)
return False
self._runner_finished = True
self._logger.debug("Completed replay artifact request with mode %s", self.mode)
return True
def _prompt_for_artifact(self, artifact_file: str) -> Dict[Any, Any]:
"""prompt for a valid artifact file"""
if not isinstance(artifact_file, str):
artifact_file = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Artifact file not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "artifact_file",
"prompt": "Path to artifact file",
"type": "text_input",
"validator": {"name": "valid_file_path"},
"pre_populate": artifact_file,
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _prompt_for_playbook(self) -> Dict[Any, Any]:
"""prepopulate a form to confirm the playbook details"""
self._logger.debug("Inventory/Playbook not set, provided, or valid, prompting")
if isinstance(self._args.playbook, str):
playbook = self._args.playbook
else:
playbook = ""
if isinstance(self._args.inventory, list):
inventory = self._args.inventory
else:
inventory = ["", "", ""]
if isinstance(self._args.cmdline, list):
cmdline = " ".join(self._args.cmdline)
else:
cmdline = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Inventory and/or playbook not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "playbook",
"pre_populate": playbook,
"prompt": "Path to playbook",
"type": "text_input",
"validator": {"name": "valid_file_path"},
}
form_dict["fields"].append(form_field)
for idx, inv in enumerate(inventory):
form_field = {
"name": f"inv_{idx}",
"pre_populate": inv,
"prompt": "Inventory source",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form_field = {
"name": "cmdline",
"pre_populate": cmdline,
"prompt": "Additional command line parameters",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _take_step(self) -> None:
"""run the current step on the stack"""
result = None
if isinstance(self.steps.current, Interaction):
result = run_action(self.steps.current.name, self.app, self.steps.current)
elif isinstance(self.steps.current, Step):
if self.steps.current.show_func:
self.steps.current.show_func()
if self.steps.current.type == "menu":
new_scroll = len(self.steps.current.value)
if self._auto_scroll:
self._interaction.ui.scroll(new_scroll)
result = self._interaction.ui.show(
obj=self.steps.current.value,
columns=self.steps.current.columns,
color_menu_item=color_menu,
)
if self._interaction.ui.scroll() < new_scroll and self._auto_scroll:
self._logger.debug("autoscroll disabled")
self._auto_scroll = False
elif self._interaction.ui.scroll() >= new_scroll and not self._auto_scroll:
self._logger.debug("autoscroll enabled")
self._auto_scroll = True
elif self.steps.current.type == "content":
result = self._interaction.ui.show(
obj=self.steps.current.value,
index=self.steps.current.index,
content_heading=content_heading,
filter_content_keys=self._content_key_filter,
)
if result is None:
self.steps.back_one()
else:
self.steps.append(result)
def _run_runner(self) -> None:
"""spin up runner"""
executable_cmd: Optional[str]
if self.mode == "stdout_w_artifact":
mode = "interactive"
else:
mode = self.mode
if isinstance(self._args.set_environment_variable, dict):
set_env_vars = {**self._args.set_environment_variable}
else:
set_env_vars = {}
if self._args.display_color is False:
set_env_vars["ANSIBLE_NOCOLOR"] = "1"
kwargs = {
"container_engine": self._args.container_engine,
"host_cwd": os.getcwd(),
"execution_environment_image": self._args.execution_environment_image,
"execution_environment": self._args.execution_environment,
"inventory": self._args.inventory,
"navigator_mode": mode,
"pass_environment_variable": self._args.pass_environment_variable,
"set_environment_variable": set_env_vars,
"private_data_dir": self._args.ansible_runner_artifact_dir,
"rotate_artifacts": self._args.ansible_runner_rotate_artifacts_count,
"timeout": self._args.ansible_runner_timeout,
}
if isinstance(self._args.playbook, str):
kwargs.update({"playbook": self._args.playbook})
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update(
{"container_volume_mounts": self._args.execution_environment_volume_mounts},
)
if isinstance(self._args.container_options, list):
kwargs.update({"container_options": self._args.container_options})
if self._args.execution_environment:
executable_cmd = "ansible-playbook"
else:
executable_cmd = shutil.which("ansible-playbook")
if not executable_cmd:
msg = "'ansible-playbook' executable not found"
self._logger.error(msg)
raise RuntimeError(msg)
pass_through_arg = []
if self._args.help_playbook is True:
pass_through_arg.append("--help")
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({"cmdline": pass_through_arg})
self.runner = CommandAsync(executable_cmd=executable_cmd, queue=self._queue, **kwargs)
self.runner.run()
self._runner_finished = False
self._logger.debug("runner requested to start")
def _dequeue(self) -> None:
"""Drain the runner queue"""
drain_count = 0
while not self._queue.empty():
if not self._first_message_received:
self._first_message_received = True
message = self._queue.get()
self._handle_message(message)
drain_count += 1
if drain_count:
self._logger.debug("Drained %s events", drain_count)
def _handle_message(self, message: dict) -> None:
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
"""Handle a runner message
:param message: The message from runner
:type message: dict
"""
try:
event = message["event"]
except KeyError:
error = f"Unhandled message from runner queue, discarded: {message}"
self._logger.critical(error)
else:
if "stdout" in message and message["stdout"]:
self.stdout.extend(message["stdout"].splitlines())
if self.mode == "stdout_w_artifact":
print(message["stdout"])
if event in ["verbose", "error"]:
if "ERROR!" in message["stdout"]:
self._msg_from_plays = ("ERROR", 9)
if self.mode == "interactive":
self._notify_error(message["stdout"])
elif "WARNING" in message["stdout"]:
self._msg_from_plays = ("WARNINGS", 13)
if event == "playbook_on_play_start":
play = message["event_data"]
play["__play_name"] = play["name"]
play["tasks"] = []
self._plays.value.append(play)
if event.startswith("runner_on_"):
runner_event = event.split("_")[2]
task = message["event_data"]
play_id = next(
idx for idx, p in enumerate(self._plays.value) if p["uuid"] == task["play_uuid"]
)
if runner_event in ["ok", "skipped", "unreachable", "failed"]:
if runner_event == "failed" and task["ignore_errors"]:
result = "ignored"
else:
result = runner_event
task["__task"] = task["task"]
task["__result"] = result.upper()
task["__changed"] = task.get("res", {}).get("changed", False)
if isinstance(task["duration"], (int, float)):
task["__duration"] = human_time(seconds=round_half_up(task["duration"]))
else:
msg = (
f"Task duration for '{task['task']}' was type {type(task['duration'])},"
" set to 0"
)
self._logger.debug(msg)
task["__duration"] = 0
task_id = None
for idx, play_task in enumerate(self._plays.value[play_id]["tasks"]):
if task["task_uuid"] == play_task["task_uuid"]:
if task["host"] == play_task["host"]:
task_id = idx
break
if task_id is not None:
self._plays.value[play_id]["tasks"][task_id].update(task)
elif runner_event == "start":
task["__host"] = task["host"]
task["__result"] = "IN_PROGRESS"
task["__changed"] = "unknown"
task["__duration"] = None
task["__number"] = len(self._plays.value[play_id]["tasks"])
task["__task"] = task["task"]
task["__task_action"] = task["task_action"]
self._plays.value[play_id]["tasks"].append(task)
def _play_stats(self) -> None:
"""Calculate the play's stats based
on it's tasks
"""
for idx, play in enumerate(self._plays.value):
total = ["__ok", "__skipped", "__failed", "__unreachable", "__ignored", "__in_progress"]
self._plays.value[idx].update(
{
tot: len([t for t in play["tasks"] if t["__result"].lower() == tot[2:]])
for tot in total
},
)
self._plays.value[idx]["__changed"] = len(
[t for t in play["tasks"] if t["__changed"] is True],
)
task_count = len(play["tasks"])
self._plays.value[idx]["__task_count"] = task_count
completed = task_count - self._plays.value[idx]["__in_progress"]
if completed:
new = floor((completed / task_count * 100))
current = self._plays.value[idx].get("__percent_complete", 0)
self._plays.value[idx]["__percent_complete"] = max(new, current)
self._plays.value[idx]["__progress"] = str(max(new, current)) + "%"
else:
self._plays.value[idx]["__progress"] = "0%"
def _prepare_to_quit(self, interaction: Interaction) -> bool:
"""Looks like we're headed out of here
:param interaction: the quit interaction
:return: a bool indicating whether of not it's safe to exit
"""
self.update()
if self.runner is not None and not self.runner.finished:
if interaction.action.match.groupdict()["exclamation"]:
self._logger.debug("shutting down runner")
self.runner.cancelled = True
while not self.runner.finished:
pass
self.write_artifact()
return True
self._logger.warning("Quit requested but playbook running, try q! or quit!")
return False
self._logger.debug("runner not running")
return True
def _task_list_for_play(self) -> Step:
"""generate a menu of task for the currently selected play
:return: The menu step
"""
value = self.steps.current.selected["tasks"]
step = Step(
name="task_list",
step_type="menu",
columns=self._task_list_columns,
select_func=self._task_from_task_list,
value=value,
)
return step
def _task_from_task_list(self) -> Step:
"""generate task content for the selected task
:return: content which show a task
"""
value = self.steps.current.value
index = self.steps.current.index
step = Step(name="task", step_type="content", index=index, value=value)
return step
def update(self) -> None:
"""Drain the queue, set the status and write the artifact if needed"""
# let the calling app update as well
self._calling_app.update()
if hasattr(self, "runner"):
self._dequeue()
self._set_status()
if self.runner.finished and not self._runner_finished:
self._logger.debug("runner finished")
self._logger.info("Playbook complete")
self.write_artifact()
self._runner_finished = True
def _get_status(self) -> Tuple[str, int]:
"""Get the status and color
:return: status string, status color
"""
status = ""
status_color = 0
if self.runner.status:
if self.runner and self.runner.finished and self.runner.status:
status = self.runner.status
if self.runner.status == "failed":
status_color = 9
else:
status_color = self._msg_from_plays[1] or 10
else:
if self._msg_from_plays[0] is not None and self._msg_from_plays[1] is not None:
status = self._msg_from_plays[0]
status_color = self._msg_from_plays[1]
else:
status = self.runner.status
status_color = 10
return status, status_color
def _set_status(self) -> None:
"""Set the UI status"""
status, status_color = self._get_status()
self._interaction.ui.update_status(status, status_color)
def write_artifact(self, filename: Optional[str] = None) -> None:
"""Write the artifact
:param filename: The file to write to
:type filename: str
"""
if (
filename
or self._args.playbook_artifact_enable is True
and self._args.help_playbook is not True
):
filename = filename or self._args.playbook_artifact_save_as
filename = filename.format(
playbook_dir=os.path.dirname(self._args.playbook),
playbook_name=os.path.splitext(os.path.basename(self._args.playbook))[0],
ts_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
)
self._logger.debug("Formatted artifact file name set to %s", filename)
filename = abs_user_path(filename)
self._logger.debug("Resolved artifact file name set to %s", filename)
status, status_color = self._get_status()
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w", encoding="utf-8") as fh:
artifact = {
"version": "1.0.0",
"plays": self._plays.value,
"stdout": self.stdout,
"status": status,
"status_color": status_color,
}
json_dump(artifact, fh)
self._logger.info("Saved artifact as %s", filename)
except (IOError, OSError) as exc:
error = (
f"Saving the artifact file failed, resulted in the following error: f{str(exc)}"
)
self._logger.error(error)
def rerun(self) -> None:
"""rerun the current playbook
since we're not reinstating run,
drain the queue, clear the steps, reset the index, etc
"""
if self._subaction_type == "run":
if self.runner.finished:
self._plays.value = []
self._plays.index = None
self._msg_from_plays = (None, None)
self._queue.queue.clear()
self.stdout = []
self._run_runner()
self.steps.clear()
self.steps.append(self._plays)
self._logger.debug("Playbook rerun triggered")
else:
self._logger.warning("Playbook rerun ignored, current playbook not complete")
elif self._subaction_type == "replay":
self._logger.error("No rerun available when artifact is loaded")
else:
self._logger.error("sub-action type '%s' is invalid", self._subaction_type)
def _notify_error(self, message: str):
"""show a blocking warning"""
warn_msg = ["Errors were encountered while running the playbook:"]
messages = remove_ansi(message).splitlines()
messages[-1] += "..."
warn_msg.extend(messages)
warn_msg += ["[HINT] To see the full error message try ':stdout'"]
warn_msg += ["[HINT] After it's fixed, try to ':rerun' the playbook"]
warning = warning_notification(warn_msg)
self._interaction.ui.show(warning)
| 37.091916
| 100
| 0.559921
| 28,553
| 0.852481
| 0
| 0
| 28,571
| 0.853018
| 0
| 0
| 8,042
| 0.240103
|
6738c6913f593e8f3489b3d849753c160556f231
| 480
|
py
|
Python
|
storagetest/pkgs/pts/__init__.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/pts/__init__.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
storagetest/pkgs/pts/__init__.py
|
liufeng-elva/storage-test2
|
5364cc00dbe71b106f1bb740bf391e6124788bf4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@file : __init__.py.py
@Time : 2020/11/12 13:37
@Author: Tao.Xu
@Email : tao.xu2008@outlook.com
"""
"""
phoronix-test-suite: Main for Performance Test
===================
https://github.com/phoronix-test-suite/phoronix-test-suite
The Phoronix Test Suite is the most comprehensive testing and
benchmarking platform available for Linux, Solaris, macOS, Windows,
and BSD operating systems.
"""
if __name__ == '__main__':
pass
| 22.857143
| 68
| 0.683333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 448
| 0.933333
|
673a1a8a7022fbc7e3838045a6969aad19ff37aa
| 8,279
|
py
|
Python
|
owlbot.py
|
rahul2393/python-spanner
|
86d33905269accabfc6d68dae0f2b78bec96026a
|
[
"Apache-2.0"
] | null | null | null |
owlbot.py
|
rahul2393/python-spanner
|
86d33905269accabfc6d68dae0f2b78bec96026a
|
[
"Apache-2.0"
] | null | null | null |
owlbot.py
|
rahul2393/python-spanner
|
86d33905269accabfc6d68dae0f2b78bec96026a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
from pathlib import Path
from typing import List, Optional
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
common = gcp.CommonTemplates()
def get_staging_dirs(
# This is a customized version of the s.get_staging_dirs() function
# from synthtool to # cater for copying 3 different folders from
# googleapis-gen:
# spanner, spanner/admin/instance and spanner/admin/database.
# Source:
# https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280
default_version: Optional[str] = None,
sub_directory: Optional[str] = None,
) -> List[Path]:
"""Returns the list of directories, one per version, copied from
https://github.com/googleapis/googleapis-gen. Will return in lexical sorting
order with the exception of the default_version which will be last (if specified).
Args:
default_version (str): the default version of the API. The directory for this version
will be the last item in the returned list if specified.
sub_directory (str): if a `sub_directory` is provided, only the directories within the
specified `sub_directory` will be returned.
Returns: the empty list if no file were copied.
"""
staging = Path("owl-bot-staging")
if sub_directory:
staging /= sub_directory
if staging.is_dir():
# Collect the subdirectories of the staging directory.
versions = [v.name for v in staging.iterdir() if v.is_dir()]
# Reorder the versions so the default version always comes last.
versions = [v for v in versions if v != default_version]
versions.sort()
if default_version is not None:
versions += [default_version]
dirs = [staging / v for v in versions]
for dir in dirs:
s._tracked_paths.add(dir)
return dirs
else:
return []
spanner_default_version = "v1"
spanner_admin_instance_default_version = "v1"
spanner_admin_database_default_version = "v1"
for library in get_staging_dirs(spanner_default_version, "spanner"):
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Work around gapic generator bug https://github.com/googleapis/gapic-generator-python/issues/902
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
r""".
Attributes:""",
r""".\n
Attributes:""",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n ==.*?==\n""",
":",
)
# Remove headings from docstring. Requested change upstream in cl/377290854 due to https://google.aip.dev/192#formatting.
s.replace(
library / f"google/cloud/spanner_{library.name}/types/transaction.py",
"""\n --.*?--\n""",
":",
)
s.move(
library,
excludes=[
"google/cloud/spanner/**",
"*.*",
"docs/index.rst",
"google/cloud/spanner_v1/__init__.py",
],
)
for library in get_staging_dirs(
spanner_admin_instance_default_version, "spanner_admin_instance"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_instance/**", "*.*", "docs/index.rst"],
)
for library in get_staging_dirs(
spanner_admin_database_default_version, "spanner_admin_database"
):
s.move(
library,
excludes=["google/cloud/spanner_admin_database/**", "*.*", "docs/index.rst"],
)
s.remove_staging_dirs()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(
microgenerator=True, samples=True, cov_level=99, split_system_tests=True,
)
s.move(templated_files,
excludes=[
".coveragerc",
".github/workflows", # exclude gh actions as credentials are needed for tests
]
)
# Ensure CI runs on a new instance each time
s.replace(
".kokoro/build.sh",
"# Remove old nox",
"""\
# Set up creating a new instance for each system test run
export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true
# Remove old nox""",
)
# Update samples folder in CONTRIBUTING.rst
s.replace("CONTRIBUTING.rst", "samples/snippets", "samples/samples")
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples()
# ----------------------------------------------------------------------------
# Customize noxfile.py
# ----------------------------------------------------------------------------
def place_before(path, text, *before_text, escape=None):
replacement = "\n".join(before_text) + "\n" + text
if escape:
for c in escape:
text = text.replace(c, "\\" + c)
s.replace([path], text, replacement)
open_telemetry_test = """
# XXX Work around Kokoro image's older pip, which borks the OT install.
session.run("pip", "install", "--upgrade", "pip")
session.install("-e", ".[tracing]", "-c", constraints_path)
# XXX: Dump installed versions to debug OT issue
session.run("pip", "list")
# Run py.test against the unit tests with OpenTelemetry.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
"""
place_before(
"noxfile.py",
"@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)",
open_telemetry_test,
escape="()",
)
skip_tests_if_env_var_not_set = """# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get(
"SPANNER_EMULATOR_HOST", ""
):
session.skip(
"Credentials or emulator host must be set via environment variable"
)
"""
place_before(
"noxfile.py",
"# Install pyopenssl for mTLS testing.",
skip_tests_if_env_var_not_set,
escape="()",
)
s.replace(
"noxfile.py",
"""f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",""",
"""\"--cov=google.cloud.spanner",
"--cov=google.cloud",
"--cov=tests.unit",""",
)
s.replace(
"noxfile.py",
r"""session.install\("-e", "."\)""",
"""session.install("-e", ".[tracing]")""",
)
s.replace(
"noxfile.py",
r"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install\("mock", "pytest", "google-cloud-testutils", "-c", constraints_path\)
session.install\("-e", ".", "-c", constraints_path\)""",
"""# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".[tracing]", "-c", constraints_path)""",
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| 32.214008
| 125
| 0.613721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,686
| 0.686798
|
673a564ceef3de9745d7d4bb80242204d7ba623d
| 1,843
|
py
|
Python
|
k_means.py
|
sokrutu/imagemean
|
680bab26a1841cd8d4e03beba020709a5cb434a2
|
[
"MIT"
] | null | null | null |
k_means.py
|
sokrutu/imagemean
|
680bab26a1841cd8d4e03beba020709a5cb434a2
|
[
"MIT"
] | null | null | null |
k_means.py
|
sokrutu/imagemean
|
680bab26a1841cd8d4e03beba020709a5cb434a2
|
[
"MIT"
] | null | null | null |
from random import randint
def k_means(data, K):
"""
k-Means clustering
TODO: Assumes values from 0-255
:param data: NxD array of numbers
:param K: The number of clusters
:return: Tuple of cluster means (KxD array) and cluster assignments (Nx1 with values from 1 to K)
"""
N = len(data)
D = len(data[0])
means = [None]*K
for i in range(0,K):
means[i] = [randint(0, 255), randint(0, 255), randint(0, 255)]
assignments = [None]*N
changed = True
while(changed):
old_means = means
# Find closest centroid
for n in range(0, N):
"max distance in RGB"
min = 442.0
index = -1
for k in range(0,K):
temp = __distance(data[n], means[k], D)
if temp <= min:
min = temp
index = k
assignments[n] = index
# Calculate the new centers
for k in range(0,K):
# Aus assignments die Indizes mit Eintrag k finden
indices = [i for i,x in enumerate(assignments) if x == k]
# ... und dann anhand derer in Data die Werte schauen
temp_data = [x for i,x in enumerate(data) if i in indices]
# ... und mitteln
means[k] = __mean(temp_data, D)
# Check if something changed
changed = False
for k in range(0,K):
if old_means[k] != means[k]:
changed = True
break
return (means, assignments)
def __distance(a, b, dim):
sum = 0.0
for i in range(0,dim):
sum += (a[i]-b[i])**2
return sum**(1/2.0)
def __mean(a, dim):
N = len(a)
sum = [0.0]*dim
for e in a:
for d in range(0,dim):
sum[d] += e[d]
avg = [a/N for a in sum]
return avg
| 25.597222
| 101
| 0.511666
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 468
| 0.253934
|
673ab82d9ec7dbd59a48086985188478a17a2fc5
| 756
|
py
|
Python
|
contrib/analysis_server/src/analysis_server/__init__.py
|
Kenneth-T-Moore/OpenMDAO-Framework
|
76e0ebbd6f424a03b547ff7b6039dea73d8d44dc
|
[
"Apache-2.0"
] | 3
|
2015-06-02T00:36:28.000Z
|
2018-11-03T00:35:21.000Z
|
contrib/analysis_server/src/analysis_server/__init__.py
|
JustinSGray/OpenMDAO-Framework
|
7ebd7fda0b10fbe8a86ae938dc4f135396dd9759
|
[
"Apache-2.0"
] | null | null | null |
contrib/analysis_server/src/analysis_server/__init__.py
|
JustinSGray/OpenMDAO-Framework
|
7ebd7fda0b10fbe8a86ae938dc4f135396dd9759
|
[
"Apache-2.0"
] | 1
|
2020-07-15T02:45:54.000Z
|
2020-07-15T02:45:54.000Z
|
"""
Support for interacting with ModelCenter via the AnalysisServer protocol.
Client-mode access to an AnalysisServer is provided by the 'client', 'factory',
and 'proxy' modules. Server-mode access by ModelCenter is provided by the
'server' and 'wrapper' modules.
An extension to the protocol allows 'eggs' to pe 'published': the egg is sent
to the server and made part of the server's set of supported components.
"""
from __future__ import absolute_import
from .client import Client
from .factory import ASFactory
from .server import Server, start_server, stop_server, DEFAULT_PORT
from .stream import Stream
from .units import have_translation, get_translation, set_translation
from .publish import publish_class, publish_object, publish_egg
| 36
| 79
| 0.797619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 420
| 0.555556
|
673b17b5d8b3ab21d7358bca547447f1eb5fad33
| 24,476
|
py
|
Python
|
3rd party/YOLO_network.py
|
isaiasfsilva/ROLO
|
6612007e35edb73dac734e7a4dac2cd4c1dca6c1
|
[
"Apache-2.0"
] | 962
|
2016-07-22T01:36:20.000Z
|
2022-03-30T01:34:35.000Z
|
3rd party/YOLO_network.py
|
isaiasfsilva/ROLO
|
6612007e35edb73dac734e7a4dac2cd4c1dca6c1
|
[
"Apache-2.0"
] | 57
|
2016-08-12T15:33:31.000Z
|
2022-01-29T19:16:01.000Z
|
3rd party/YOLO_network.py
|
isaiasfsilva/ROLO
|
6612007e35edb73dac734e7a4dac2cd4c1dca6c1
|
[
"Apache-2.0"
] | 342
|
2016-07-22T01:36:26.000Z
|
2022-02-26T23:00:25.000Z
|
import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_small graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(0)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('gt',img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print 'cannot open video file: ' + filepath
else:
print 'unknown error reading video file'
return video
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size= len(pred_locs)
assert (len(gts)== batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
#return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') #for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised= [x1 + w/2, y1 + h/2, w, h]
max_ious= 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0]= class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss= sum([(location[i] - gt_location[i])**2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
total_time= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
start_time = time.time()
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
print "Time Spent on Tracking: " + str(total_time)
print "fps: " + str(id/total_time)
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1= int(loc[0]- loc[2]/2)
y1= int(loc[1]- loc[3]/2)
x2= int(loc[0]+ loc[2]/2)
y2= int(loc[1]+ loc[3]/2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2]= self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y*32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec= self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths= self.load_folder(img_fold)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations)== 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict]), [0,0,0,0,0,0]]]
else:
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict])]]
self.save_yolo_output(out_fold, yolo_output, filename)
return
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
yolo = YOLO_TF(argvs)
test = 4
heatmap= False#True
'''
VOT30
0:'Human2'
1:'Human9'
2:'Gym'
3:'Human8'
4:'Skater'
5:'Suv'
6:'BlurBody'
7:'CarScale'
8:'Dancer2'
9:'BlurCar1'
10:'Dog'
11:'Jump'
12:'Singer2'
13:'Woman'
14:'David3'
15:'Dancer'
16:'Human7'
17:'Bird1'
18:'Car4'
19:'CarDark'
20:'Couple'
21:'Diving'
22:'Human3'
23:'Skating1'
24:'Human6'
25:'Singer1'
26:'Skater2'
27:'Walking2'
28:'BlurCar3'
29:'Girl2'
MOT2016
30:'MOT16-02'
31:'MOT16-04'
32:'MOT16-05'
33:'MOT16-09'
34:'MOT16-10'
35:'MOT16-11'
36:'MOT16-13'
37:'MOT16-01'
38:'MOT16-03'
39:'MOT16-06'
40:'MOT16-07'
41:'MOT16-08'
42:'MOT16-12'
43:'MOT16-14'
'''
[yolo.w_img, yolo.h_img, sequence_name, dummy_1, dummy_2]= util.choose_video_sequence(test)
if (test >= 0 and test <= 29) or (test >= 90):
root_folder = 'benchmark/DATA'
img_fold = os.path.join(root_folder, sequence_name, 'img/')
elif test<= 36:
root_folder = 'benchmark/MOT/MOT2016/train'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
elif test<= 43:
root_folder = 'benchmark/MOT/MOT2016/test'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
gt_file = os.path.join(root_folder, sequence_name, 'groundtruth_rect.txt')
out_fold = os.path.join(root_folder, sequence_name, 'yolo_out/')
heat_fold = os.path.join(root_folder, sequence_name, 'yolo_heat/')
yolo.createFolder(out_fold)
yolo.createFolder(heat_fold)
if heatmap is True:
yolo.prepare_training_data_heatmap(img_fold, gt_file, heat_fold)
else:
if (test >= 0 and test <= 29) or (test >= 90):
yolo.prepare_training_data(img_fold,gt_file,out_fold)
else:
yolo.prepare_training_data_multiTarget(img_fold,out_fold)
if __name__=='__main__':
main(sys.argv)
| 35.6793
| 209
| 0.664774
| 22,168
| 0.905704
| 0
| 0
| 0
| 0
| 0
| 0
| 4,213
| 0.172128
|
673cf80cda7d6f2ddfed4ffa2f717379b2c4aa55
| 3,146
|
py
|
Python
|
pipenv/cmdparse.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 23
|
2017-01-20T01:18:31.000Z
|
2017-01-20T17:25:11.000Z
|
pipenv/cmdparse.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | 1
|
2017-01-20T05:13:58.000Z
|
2017-01-20T05:13:58.000Z
|
pipenv/cmdparse.py
|
sthagen/pipenv
|
0924f75fd1004c848ea67d4272315eda4210b352
|
[
"MIT"
] | null | null | null |
import itertools
import re
import shlex
class ScriptEmptyError(ValueError):
pass
def _quote_if_contains(value, pattern):
if next(iter(re.finditer(pattern, value)), None):
return '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', value))
return value
class Script(object):
"""Parse a script line (in Pipfile's [scripts] section).
This always works in POSIX mode, even on Windows.
"""
def __init__(self, command, args=None):
self._parts = [command]
if args:
self._parts.extend(args)
@classmethod
def parse(cls, value):
if isinstance(value, str):
value = shlex.split(value)
if not value:
raise ScriptEmptyError(value)
return cls(value[0], value[1:])
def __repr__(self):
return "Script({0!r})".format(self._parts)
@property
def command(self):
return self._parts[0]
@property
def args(self):
return self._parts[1:]
@property
def cmd_args(self):
return self._parts
def extend(self, extra_args):
self._parts.extend(extra_args)
def cmdify(self):
"""Encode into a cmd-executable string.
This re-implements CreateProcess's quoting logic to turn a list of
arguments into one single string for the shell to interpret.
* All double quotes are escaped with a backslash.
* Existing backslashes before a quote are doubled, so they are all
escaped properly.
* Backslashes elsewhere are left as-is; cmd will interpret them
literally.
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
foul characters. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
Foul characters include:
* Whitespaces.
* Carets (^). (pypa/pipenv#3307)
* Parentheses in the command. (pypa/pipenv#3168)
Carets introduce a difficult situation since they are essentially
"lossy" when parsed. Consider this in cmd.exe::
> echo "foo^bar"
"foo^bar"
> echo foo^^bar
foo^bar
The two commands produce different results, but are both parsed by the
shell as `foo^bar`, and there's essentially no sensible way to tell
what was actually passed in. This implementation assumes the quoted
variation (the first) since it is easier to implement, and arguably
the more common case.
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
return " ".join(
itertools.chain(
[_quote_if_contains(self.command, r"[\s^()]")],
(_quote_if_contains(arg, r"[\s^]") for arg in self.args),
)
)
| 30.543689
| 96
| 0.62206
| 2,919
| 0.927845
| 0
| 0
| 399
| 0.126828
| 0
| 0
| 1,959
| 0.622695
|
673d6da7ddbe2f62dc10d702de83d4dd27b4df32
| 1,059
|
py
|
Python
|
msph/clients/ms_online.py
|
CultCornholio/solenya
|
583cb5f36825808c7cdc2de03f565723a32ae8d3
|
[
"MIT"
] | 11
|
2021-09-01T05:04:08.000Z
|
2022-02-17T01:09:58.000Z
|
msph/clients/ms_online.py
|
CultCornholio/solenya
|
583cb5f36825808c7cdc2de03f565723a32ae8d3
|
[
"MIT"
] | null | null | null |
msph/clients/ms_online.py
|
CultCornholio/solenya
|
583cb5f36825808c7cdc2de03f565723a32ae8d3
|
[
"MIT"
] | 2
|
2021-09-08T19:12:53.000Z
|
2021-10-05T17:52:11.000Z
|
from .framework import Client, Resource
from . import constants as const
client = Client(
base_url='https://login.microsoftonline.com',
base_headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Content-Type': 'application/x-www-form-urlencoded',
}
)
@client.endpoint
def get_device_code(client_id:str) -> str:
return Resource(
uri='/organizations/oauth2/v2.0/devicecode',
data={"client_id": client_id, "scope": const.DEVICE_CODE_SCOPE},
)
@client.endpoint
def get_access_token(client_id:str, device_code:str) -> dict:
return Resource(
uri='/organizations/oauth2/v2.0/token',
data={"grant_type": const.ACCESS_TOKEN_GRANT, "client_id": client_id, "code": device_code},
)
@client.endpoint
def refresh_access_token(refresh_token:str, target_id:str) -> dict:
return Resource(
uri='/common/oauth2/v2.0/token',
data={'grant_type': 'refresh_token', 'refresh_token': refresh_token, 'scope': const.DEVICE_CODE_SCOPE}
)
| 32.090909
| 110
| 0.686497
| 0
| 0
| 0
| 0
| 728
| 0.687441
| 0
| 0
| 362
| 0.341832
|
673f2e75107755cce6965c485de6141329c56f72
| 1,868
|
py
|
Python
|
warn/platforms/job_center/cache.py
|
anikasikka/warn-scraper
|
13efac478ac06982bf68ce67e15db976ac07f101
|
[
"Apache-2.0"
] | 12
|
2022-01-18T20:04:41.000Z
|
2022-03-24T21:26:31.000Z
|
warn/platforms/job_center/cache.py
|
anikasikka/warn-scraper
|
13efac478ac06982bf68ce67e15db976ac07f101
|
[
"Apache-2.0"
] | 163
|
2022-01-14T19:30:23.000Z
|
2022-03-31T23:48:48.000Z
|
warn/platforms/job_center/cache.py
|
anikasikka/warn-scraper
|
13efac478ac06982bf68ce67e15db976ac07f101
|
[
"Apache-2.0"
] | 4
|
2022-01-19T20:40:13.000Z
|
2022-02-22T21:36:34.000Z
|
import logging
import re
from warn.cache import Cache as BaseCache
from .urls import urls
logger = logging.getLogger(__name__)
class Cache(BaseCache):
"""A custom cache for Job Center sites."""
def save(self, url, params, html):
"""Save file to the cache."""
cache_key = self.key_from_url(url, params)
self.write(cache_key, html)
logger.debug(f"Saved to cache: {cache_key}")
def fetch(self, url, params):
"""Fetch file from the cache."""
cache_key = self.key_from_url(url, params)
content = self.read(cache_key)
logger.debug(f"Fetched from cache: {cache_key}")
return content
def key_from_url(self, url, params=None):
"""Convert a URL to a cache key."""
page_type = (
"records" if re.search(r"warn_lookups/\d+$", url) else "search_results"
)
if page_type == "records":
record_number = url.rsplit("/")[-1]
cache_key = f"records/{record_number}.html"
# Otherwise this is an initial search with params or a downstream page URL
else:
start_key = "q[notice_on_gteq]"
end_key = "q[notice_on_lteq]"
# For downstream pages, extract start/end dates + page number
if "page" in url:
parsed_params = urls.parse_url_query(url)
page_num = urls.page_num_from_url(url)
start = parsed_params[start_key][0]
end = parsed_params[end_key][0]
# For initial search page, get metadata from params
else:
if not params:
params = {}
start = params[start_key]
end = params[end_key]
page_num = 1
cache_key = f"search_results/{start}_{end}_page{page_num}.html"
return cache_key
| 34.592593
| 83
| 0.579764
| 1,735
| 0.928801
| 0
| 0
| 0
| 0
| 0
| 0
| 571
| 0.305675
|
673f39d965787c5f1eaa35294c38eb2b5dda219c
| 7,312
|
py
|
Python
|
ebcli/core/abstractcontroller.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | 110
|
2020-01-15T22:58:46.000Z
|
2022-03-27T20:47:33.000Z
|
ebcli/core/abstractcontroller.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | 89
|
2020-01-15T23:18:34.000Z
|
2022-03-31T21:56:05.000Z
|
ebcli/core/abstractcontroller.py
|
senstb/aws-elastic-beanstalk-cli
|
ef27ae50e8be34ccbe29bc6dc421323bddc3f485
|
[
"Apache-2.0"
] | 50
|
2020-01-15T22:58:53.000Z
|
2022-02-11T17:39:28.000Z
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
import os
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@classmethod
def validate_workspace(cls):
workspace_type = fileoperations.get_workspace_type(None)
is_platform_workspace_only_command = cls.Meta.__dict__.get(
'is_platform_workspace_only_command'
)
requires_directory_initialization = cls.Meta.__dict__.get(
'requires_directory_initialization'
)
if '--modules' in sys.argv:
pass
elif '--help' in sys.argv:
pass
elif cls.__name__ == 'PlatformListController' or cls.__name__ == 'EBPListController':
pass
elif requires_directory_initialization and not workspace_type:
raise NotInitializedError(strings['exit.notsetup'])
elif is_platform_workspace_only_command:
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
self.validate_workspace()
self.do_command()
self.check_for_cli_update(__version__)
def check_workspace_type(self, expected_type):
workspace_type = fileoperations.get_workspace_type()
if workspace_type != expected_type:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise PlatformWorkspaceNotSupportedError(
strings['exit.platformworkspacenotsupported']
)
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
def check_for_cli_update(self, version):
label = self.Meta.label
if label in ('create', 'deploy', 'status', 'clone', 'config'):
if cli_update_exists(version):
if self.check_install_script_used():
io.log_alert(strings['base.update_available_script_install'])
else:
io.log_alert(strings['base.update_available'])
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False, varname='environment_name'):
env_name = getattr(self.app.pargs, varname, None)
if not env_name:
env_name = commonops. \
get_current_branch_environment()
workspace_type = fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)
if not env_name:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise EBCLIException(strings['platform.nobuilderenv'])
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
def check_install_script_used(self):
return '.ebcli-virtual-env' in os.path.abspath(__file__)
@classmethod
def _add_to_handler(cls, handler):
handler.register(cls)
@property
def _help_text(self):
"""
Returns the help text displayed when for the commands of the type `eb <command> <subcommand>`
except where <command> is "platform".
"""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
cmd_name = label
cmd_aliases = cmd['aliases']
if len(cmd_aliases) > 0 and cmd['aliases_only']:
cmd_name = cmd_aliases.pop(0)
cmd_txt += '{}'.format(cmd_name)
if cmd['help']:
cmd_txt += '{}{}'.format(pad(cmd_txt), cmd['help'])
if len(cmd_aliases) > 0:
cmd_txt += '\n{}(alias: {})'.format(pad(''), ', '.join(cmd_aliases))
cmd_txt += '\n'
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''{}
commands:
{}
'''.format(self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
def cli_update_exists(current_version):
try:
data = utils.get_data_from_url(
'https://pypi.python.org/pypi/awsebcli/json', timeout=5)
data = json.loads(data)
latest = data['info']['version']
return latest != current_version
except:
return False
| 33.085973
| 101
| 0.603665
| 5,865
| 0.802106
| 0
| 0
| 2,921
| 0.39948
| 0
| 0
| 1,787
| 0.244393
|
673f86c193b95f2ceb11fd09422584819f2d7221
| 346
|
py
|
Python
|
python/speaktest.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
python/speaktest.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
python/speaktest.py
|
kyle-cook/templates
|
f1047a8c31a42507acbd7a27e66db0825be811a6
|
[
"MIT"
] | null | null | null |
import unittest
import speak
class SpeakTests(unittest.TestCase):
"""
Unit test for the speak library
"""
def testHello(self):
self.assertEqual("Hello World!", speak.helloworld())
def testGoodbye(self):
self.assertEqual("Goodbye World!", speak.goodbyeworld())
if __name__ == "__main__":
unittest.main()
| 21.625
| 64
| 0.66474
| 267
| 0.771676
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.251445
|
674032fc8a912ba3dd53e6c5a60619d54e34cbd4
| 482
|
py
|
Python
|
c2f_loop.py
|
devopsprosiva/python
|
07311d7597c0895554efe8013b57f218a0f11bb5
|
[
"MIT"
] | null | null | null |
c2f_loop.py
|
devopsprosiva/python
|
07311d7597c0895554efe8013b57f218a0f11bb5
|
[
"MIT"
] | null | null | null |
c2f_loop.py
|
devopsprosiva/python
|
07311d7597c0895554efe8013b57f218a0f11bb5
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
import sys
temperatures=[10,-20,-289,100]
def c2f(cel_temp):
if cel_temp < -273.15:
return "The lowest possible temperature that physical matter can reach is -273.15C"
else:
fah_temp=(cel_temp*1.8)+32
return fah_temp
for temp in temperatures:
file = open('temperatures.txt','a+')
if temp > -273.15:
temp_output = c2f(temp)
file.write(str(temp_output))
file.write("\n")
file.close()
| 22.952381
| 91
| 0.620332
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.259336
|
67409afcdfe55eae6e448c076e01c6ac7a7788be
| 2,285
|
py
|
Python
|
problems/eggs/services/confirm_min_throws_server.py
|
giuliagalvan/TAlight
|
3471ea9c7f13ade595ae579db0713135da849f13
|
[
"MIT"
] | null | null | null |
problems/eggs/services/confirm_min_throws_server.py
|
giuliagalvan/TAlight
|
3471ea9c7f13ade595ae579db0713135da849f13
|
[
"MIT"
] | null | null | null |
problems/eggs/services/confirm_min_throws_server.py
|
giuliagalvan/TAlight
|
3471ea9c7f13ade595ae579db0713135da849f13
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# METADATA OF THIS TAL_SERVICE:
problem="eggs"
service="confirm_min_throws"
args_list = [
('min',int),
('n_eggs',int),
('n_floors',int),
('lang',str),
('ISATTY',bool),
]
from sys import stderr, exit, argv
from random import randrange
from math import inf as IMPOSSIBLE
from multilanguage import Env, Lang, TALcolors
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
# INITIALIZATON: allocation, base cases, sentinels
table = [ [0] + [IMPOSSIBLE] * ENV['n_floors'] ]
for u in range(ENV['n_eggs']):
table.append([0] + [None] * ENV['n_floors'])
# INDUCTTVE STEP: the min-max recursion with nature playing against
for u in range(1,1+ENV['n_eggs']):
for f in range(1,1+ENV['n_floors']):
table[u][f] = IMPOSSIBLE
for first_launch_floor in range(1,1+f):
table[u][f] = min(table[u][f],1+max(table[u][f-first_launch_floor],table[u-1][first_launch_floor-1]))
if table[ENV['n_eggs']][ENV['n_floors']] < ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
#English: print("No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
if table[ENV['n_eggs']][ENV['n_floors']] > ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then no policy guarantees you to find out the truth within {ENV['min']} launches in every possible scenario (aka, whathever the truth is).")
#English:
if table[ENV['n_eggs']][ENV['n_floors']] == ENV['min']:
print(f"Yes! Indeed, {ENV['min']} is the smallest possible natural B such that, when you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']}, still there exists a policy that guarantees you to find out the truth within B launches in every possible scenario.")
#English:
exit(0)
| 46.632653
| 279
| 0.688403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0.607877
|
67416b98862ed94f8c8dd26ec4773d955430f943
| 460
|
py
|
Python
|
pylox/error_reporting.py
|
hculpan/pylox
|
a5bde624f289115575e9e01bd171b6271c2e899a
|
[
"MIT"
] | 1
|
2018-05-18T08:16:02.000Z
|
2018-05-18T08:16:02.000Z
|
pylox/error_reporting.py
|
hculpan/pylox
|
a5bde624f289115575e9e01bd171b6271c2e899a
|
[
"MIT"
] | null | null | null |
pylox/error_reporting.py
|
hculpan/pylox
|
a5bde624f289115575e9e01bd171b6271c2e899a
|
[
"MIT"
] | null | null | null |
errorFound = False
def hasError():
global errorFound
return errorFound
def clearError():
global errorFound
errorFound = False
def error(message, lineNo = 0):
report(lineNo, "", message)
def report(lineNo, where, message):
global errorFound
errorFound = True
if lineNo == 0:
print("Error {1}: {2}".format(lineNo, where, message))
else:
print("[Line {0}] Error {1}: {2}".format(lineNo, where, message))
| 19.166667
| 73
| 0.630435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.097826
|
674628d16822f8d4efcc764dcb583fc1ae5fb351
| 86
|
py
|
Python
|
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
tests/syntax/scripts/annotated_comments.py
|
toddrme2178/pyccel
|
deec37503ab0c5d0bcca1a035f7909f7ce8ef653
|
[
"MIT"
] | null | null | null |
#$ header variable x :: int
#$ acc parallel private(idx)
#$ omp parallel private(idx)
| 21.5
| 28
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.965116
|
6746ba919e9bbb1f397db2429492049488882aa8
| 1,361
|
py
|
Python
|
server/admin.py
|
allisto/allistic-server
|
848edb71b4709ad0734b83a43de4ac8c58e88fdf
|
[
"Apache-2.0"
] | 5
|
2019-03-04T08:28:08.000Z
|
2019-03-05T05:55:55.000Z
|
server/admin.py
|
allisto/allistic-server
|
848edb71b4709ad0734b83a43de4ac8c58e88fdf
|
[
"Apache-2.0"
] | 7
|
2019-03-03T19:45:02.000Z
|
2021-03-18T21:26:08.000Z
|
server/admin.py
|
allisto/allistic-server
|
848edb71b4709ad0734b83a43de4ac8c58e88fdf
|
[
"Apache-2.0"
] | 1
|
2019-03-01T11:15:07.000Z
|
2019-03-01T11:15:07.000Z
|
from django.contrib import admin
from .models import Doctor, ConsultationTime, Medicine, Allergy, Child, Parent
admin.site.site_header = "Allisto - We Do Good"
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'specialization', 'email', 'phone_number')
list_filter = ('specialization', 'consultation_fee', 'working_hours')
search_fields = ('name', 'specialization', 'consultation_fee')
@admin.register(Parent)
class ParentAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
list_filter = ('name', 'email', 'phone_number')
search_fields = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
@admin.register(Child)
class ChildAdmin(admin.ModelAdmin):
list_display = ('name', 'autistic', 'birthday', 'gender')
list_filter = ('name', 'autistic', 'birthday')
search_fields = ('name', 'autistic', 'birthday')
@admin.register(Allergy)
class AllergyAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
@admin.register(Medicine)
class MedicineAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
admin.site.register(ConsultationTime)
| 30.931818
| 87
| 0.702425
| 1,022
| 0.750918
| 0
| 0
| 1,144
| 0.840558
| 0
| 0
| 472
| 0.346804
|
67470f3c7a77e0bc298ea17e0cb678c91fe2570a
| 4,067
|
py
|
Python
|
backend/ir/ir.py
|
zengljnwpu/yaspc
|
5e85efb5fb8bee02471814b10e950dfb5b04c5d5
|
[
"MIT"
] | null | null | null |
backend/ir/ir.py
|
zengljnwpu/yaspc
|
5e85efb5fb8bee02471814b10e950dfb5b04c5d5
|
[
"MIT"
] | null | null | null |
backend/ir/ir.py
|
zengljnwpu/yaspc
|
5e85efb5fb8bee02471814b10e950dfb5b04c5d5
|
[
"MIT"
] | null | null | null |
from backend.entity.entity import DefinedFuntion
from backend.ir.dumper import Dumper
from backend.ir.stmt import Assign
from backend.ir.stmt import Return
from backend.ir.expr import Bin
from backend.ir.expr import Call
from backend.entity.scope import *
def import_ir(data, asm_file):
def_vars = list()
def_funs = list()
for i in data["variablelist"]:
t = DefinedVariable(
name=i["name"], type=i["type"], priv=i["is_private"], init=i["value"])
def_vars.append(t)
for i in data["functionlist"]:
t = DefinedFuntion(priv=False, body=i["body"], name=i["name"],
params=i["parameterlist"], type=i["type"], scope=LocalScope(i["variablelist"]))
def_funs.append(t)
ir = IR(source=asm_file, defuns=def_funs, defvars=def_vars,
constant_table=None, funcdecls=None, scope=None)
return ir
def inst_factory(insn):
if insn["name"] == "store":
return Assign(loc=insn["line_number"], lhs=insn["address"], rhs=insn["value"])
elif insn["name"] == "return":
return Return(loc=insn["line_number"], expr=insn["expr"])
elif insn["name"] == "bin":
return Bin(left=insn["left"], right=insn["right"], op=insn["op"], type=insn["type"], value=insn["value"])
elif insn["name"] == "call":
return Call(args=insn["args"], expr=insn["expr"], type=insn["type"])
else:
raise Exception("Feature not implemented")
# This class were used to import IR from json text
class IR ():
def __init__(self,
source,
defvars,
defuns,
funcdecls,
constant_table,
scope):
self.source = source
self.defvars = defvars
self.defuns = defuns
self.funcdecls = funcdecls
self.scope = scope
self.constant_table = constant_table
self.gvars = []
self.comms = []
def file_name(self):
return self.source
def location(self):
return self.source
def defined_variables(self):
return self.defvars
def is_function_defined(self):
if self.defuns:
return True
else:
return False
def defined_funcitons(self):
return self.defuns
def scope(self):
return self.scope
def all_functions(self):
result = []
if self.defuns:
result.extend(self.defuns)
if self.funcdecls:
result.extend(self.funcdecls)
return result
def init_variables(self):
self.comms = []
self.comms = []
for var in self.scope.defined_glabal_scope_variables():
if var.has_initializer == True:
self.gvars.append(var)
else:
self.comms.append(var)
#a list of all defined/declared global-scope variables
def all_global_variables(self):
#return self.scope.all_global_variables()
return self.defvars
def is_global_variable_defined(self):
if self.defined_global_variables:
return True
else:
return False
#Returns the list of global variables.
def defined_global_variables(self):
'''
if not self.gvars:
self.init_variables()
else:
return self.gvars
'''
return self.defvars
def is_common_symbol_defined(self):
if self.defined_common_symbols():
return True
else:
return False
def defined_common_symbols(self):
if not self.comms:
self.init_variables()
else:
return self.comms
def is_string_literal_defined(self):
if self.constant_table:
return True
else:
return False
def const_table(self):
return self.constant_table
def dump(self):
d = Dumper()
d.print_class(self, self.source)
d.print_vars("variables", self.defvars)
d.print_funs("function", self.defuns)
| 27.856164
| 113
| 0.588149
| 2,560
| 0.629457
| 0
| 0
| 0
| 0
| 0
| 0
| 604
| 0.148512
|
67475ec9e070602cd855d1d0690b385ad1b9adb8
| 10,060
|
py
|
Python
|
forest/benchmarking/tests/test_superoperator_transformations.py
|
stjordanis/forest-benchmarking
|
f9ad9701c2d253de1a0c922d7220ed7de75ac685
|
[
"Apache-2.0"
] | 40
|
2019-01-25T18:35:24.000Z
|
2022-03-13T11:21:18.000Z
|
forest/benchmarking/tests/test_superoperator_transformations.py
|
stjordanis/forest-benchmarking
|
f9ad9701c2d253de1a0c922d7220ed7de75ac685
|
[
"Apache-2.0"
] | 140
|
2019-01-25T20:09:02.000Z
|
2022-03-12T01:08:01.000Z
|
forest/benchmarking/tests/test_superoperator_transformations.py
|
stjordanis/forest-benchmarking
|
f9ad9701c2d253de1a0c922d7220ed7de75ac685
|
[
"Apache-2.0"
] | 22
|
2019-02-01T13:18:35.000Z
|
2022-01-12T15:03:13.000Z
|
import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
def amplitude_damping_kraus(p):
Ad0 = np.asarray([[1, 0], [0, np.sqrt(1 - p)]])
Ad1 = np.asarray([[0, np.sqrt(p)], [0, 0]])
return [Ad0, Ad1]
def amplitude_damping_chi(p):
poly1 = (1 + np.sqrt(1 - p)) ** 2
poly2 = (-1 + np.sqrt(1 - p)) ** 2
ad_pro = 0.25 * np.asarray([[poly1, 0, 0, p],
[0, p, -1j * p, 0],
[0, 1j * p, p, 0],
[p, 0, 0, poly2]])
return ad_pro
def amplitude_damping_pauli(p):
poly1 = np.sqrt(1 - p)
ad_pau = np.asarray([[1, 0, 0, 0],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[p, 0, 0, 1 - p]])
return ad_pau
def amplitude_damping_super(p):
poly1 = np.sqrt(1 - p)
ad_sup = np.asarray([[1, 0, 0, p],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[0, 0, 0, 1 - p]])
return ad_sup
def amplitude_damping_choi(p):
poly1 = np.sqrt(1 - p)
ad_choi = np.asarray([[1, 0, 0, poly1],
[0, 0, 0, 0],
[0, 0, p, 0],
[poly1, 0, 0, 1 - p]])
return ad_choi
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
def one_q_pauli_channel_chi(px, py, pz):
p = (px + py + pz)
pp_chi = np.asarray([[1 - p, 0, 0, 0],
[0, px, 0, 0],
[0, 0, py, 0],
[0, 0, 0, pz]])
return pp_chi
# Pauli twirled Amplitude damping channel
def analytical_pauli_twirl_of_AD_chi(p):
# see equation 7 of https://arxiv.org/pdf/1701.03708.pdf
poly1 = (2 + 2 * np.sqrt(1 - p) - p) / 4
poly2 = p / 4
poly3 = (2 - 2 * np.sqrt(1 - p) - p) / 4
pp_chi = np.asarray([[poly1, 0, 0, 0],
[0, poly2, 0, 0],
[0, 0, poly2, 0],
[0, 0, 0, poly3]])
return pp_chi
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
def test_vec():
A = np.asarray([[1, 2], [3, 4]])
B = np.asarray([[1, 2, 5], [3, 4, 6]])
np.testing.assert_array_equal(np.array([[1], [3], [2], [4]]), vec(A))
np.testing.assert_array_equal(np.array([[1], [3], [2], [4], [5], [6]]), vec(B))
def test_unvec():
A = np.asarray([[1, 2], [3, 4]])
C = np.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_array_equal(A, unvec(vec(A)))
np.testing.assert_array_equal(C, unvec(vec(C)))
def test_kraus_ops_sum_to_identity():
# Check kraus ops sum to identity
p = np.random.rand()
Ad0, Ad1 = amplitude_damping_kraus(p)
np.testing.assert_array_almost_equal_nulp(np.matmul(Ad0.transpose().conj(), Ad0)
+ np.matmul(Ad1.transpose().conj(), Ad1), np.eye(2))
def test_kraus2chi():
assert np.allclose(HADChi, kraus2chi(H))
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChi = amplitude_damping_chi(p)
assert np.allclose(AdChi, kraus2chi(AdKraus))
assert np.allclose(superop2chi(IZSuper), kraus2chi(IZKraus))
def test_kraus2pauli_liouville():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(kraus2pauli_liouville(AdKraus), AdPauli)
assert np.allclose(kraus2pauli_liouville(H), HADPauli)
def test_kraus2superop():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdSuper = amplitude_damping_super(p)
np.testing.assert_array_almost_equal_nulp(kraus2superop(AdKraus), AdSuper)
# test application of super operator is the same as application of Kraus ops
ONE_STATE_VEC = vec(ONE_STATE)
np.testing.assert_array_almost_equal_nulp(unvec(np.matmul(kraus2superop(AdKrausOps),
ONE_STATE_VEC)), rho_out)
assert np.allclose(kraus2superop(H), HADSuper)
assert np.allclose(kraus2superop(IZKraus), IZSuper)
# Below here tests non square Kraus operators
# In this example The Kraus operator is M_0 = I \otimes <0| where <0| = (1,0)
Idd = np.asarray([[1, 0], [0, 1]])
M0 = np.kron(Idd, np.asarray([[1, 0]]))
attempt = kraus2superop(M0)
answer = np.kron(M0.conj(), M0)
assert np.allclose(answer, attempt)
def test_kraus2choi():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(kraus2choi(AdKraus), AdChoi)
assert np.allclose(kraus2choi(H), HADChoi)
def test_chi2pauli_liouville():
p = np.random.rand()
AdChi = amplitude_damping_chi(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, chi2pauli_liouville(AdChi))
assert np.allclose(HADPauli, chi2pauli_liouville(HADChi))
def test_basis_transform_p_to_c():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(unvec(pauli2computational_basis_matrix(4) @ xz_pauli_basis), np.kron(X, Z))
def test_basis_transform_c_to_p():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(computational2pauli_basis_matrix(4) @ vec(np.kron(X, Z)), xz_pauli_basis)
def test_pl_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
pl = kraus2pauli_liouville(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, pauli_liouville2choi(pl))
pl = kraus2pauli_liouville(H)
choi = kraus2choi(H)
assert np.allclose(choi, pauli_liouville2choi(pl))
def test_superop_to_kraus():
assert np.allclose(superop2kraus(IZSuper), IZKraus)
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdKraus = amplitude_damping_kraus(p)
kraus_ops = superop2kraus(AdSuper)
# the order of the Kraus ops matters
# TODO: fix the sign problem in Kraus operators
assert np.allclose([np.abs(kraus_ops[1]), np.abs(kraus_ops[0])], AdKraus)
def test_superop_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
superop = kraus2superop(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, superop2choi(superop))
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdChoi, superop2choi(AdSuper))
superop = kraus2superop(H)
choi = kraus2choi(H)
assert np.allclose(choi, superop2choi(superop))
def test_superop_to_pl():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, superop2pauli_liouville(AdSuper))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(pauli, superop2pauli_liouville(superop))
def test_pauli_liouville_to_superop():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdSuper, pauli_liouville2superop(AdPauli))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(superop, pauli_liouville2superop(pauli))
def test_choi_to_kraus():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
choi = kraus2choi(pauli[1])
kraus = choi2kraus(choi)
assert np.allclose(choi, kraus2choi(kraus))
id_choi = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])
assert np.allclose(kraus2choi(choi2kraus(id_choi)), id_choi)
for kraus in choi2kraus(id_choi):
assert np.allclose(abs(kraus), np.eye(2)) or np.allclose(kraus, np.zeros((2, 2)))
def test_choi_to_super():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdSuper, choi2superop(AdChoi))
def test_choi_pl_bijectivity():
assert np.allclose(choi2superop(choi2superop(np.eye(4))), np.eye(4))
assert np.allclose(superop2choi(superop2choi(np.eye(4))), np.eye(4))
h_choi = kraus2choi(H)
h_superop = kraus2superop(H)
assert np.allclose(choi2superop(choi2superop(h_choi)), h_choi)
assert np.allclose(superop2choi(superop2choi(h_superop)), h_superop)
| 34.930556
| 98
| 0.602485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 976
| 0.097018
|
6748094d5dca0ac93c047a1471d4c4dfa641d3ad
| 112
|
py
|
Python
|
0702 In-Place Move Zeros to End of List.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 1
|
2020-12-29T21:17:26.000Z
|
2020-12-29T21:17:26.000Z
|
0702 In-Place Move Zeros to End of List.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | null | null | null |
0702 In-Place Move Zeros to End of List.py
|
ansabgillani/binarysearchcomproblems
|
12fe8632f8cbb5058c91a55bae53afa813a3247e
|
[
"MIT"
] | 4
|
2021-09-09T17:42:43.000Z
|
2022-03-18T04:54:03.000Z
|
class Solution:
def solve(self, nums):
return [num for num in nums if num != 0] + [0]*nums.count(0)
| 28
| 68
| 0.598214
| 111
| 0.991071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
674979db2e403ec19a4fc12df3f2a373c9172b77
| 86
|
py
|
Python
|
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | 1
|
2021-06-22T22:14:16.000Z
|
2021-06-22T22:14:16.000Z
|
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | null | null | null |
OIL/__init__.py
|
vjdad4m/OIL
|
a664fe213723fe354796245632f58f31583bcba0
|
[
"MIT"
] | null | null | null |
import OIL.color
import OIL.label
import OIL.parser
import OIL.tools
import OIL.errors
| 17.2
| 17
| 0.837209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6749e169faceb4050a87041472715faed2d19901
| 2,866
|
py
|
Python
|
lib/spack/spack/cmd/load.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-03-05T10:54:32.000Z
|
2021-03-05T14:14:52.000Z
|
lib/spack/spack/cmd/load.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32
|
2020-12-15T17:29:20.000Z
|
2022-03-21T15:08:31.000Z
|
lib/spack/spack/cmd/load.py
|
padamson/spack
|
d3f67a48552691b4846ccc4a10f76740b154090c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-07-19T20:31:27.000Z
|
2021-07-19T21:14:14.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
import spack.store
import spack.user_environment as uenv
import spack.util.environment
description = "add package to the user environment"
section = "user environment"
level = "short"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
arguments.add_common_arguments(
subparser, ['recurse_dependencies', 'installed_specs'])
shells = subparser.add_mutually_exclusive_group()
shells.add_argument(
'--sh', action='store_const', dest='shell', const='sh',
help="print sh commands to load the package")
shells.add_argument(
'--csh', action='store_const', dest='shell', const='csh',
help="print csh commands to load the package")
shells.add_argument(
'--fish', action='store_const', dest='shell', const='fish',
help="print fish commands to load the package")
subparser.add_argument(
'--first',
action='store_true',
default=False,
dest='load_first',
help="load the first match if multiple packages match the spec"
)
subparser.add_argument(
'--only',
default='package,dependencies',
dest='things_to_load',
choices=['package', 'dependencies'],
help="""select whether to load the package and its dependencies
the default is to load the package and all dependencies
alternatively one can decide to load only the package or only
the dependencies"""
)
def load(parser, args):
env = ev.get_env(args, 'load')
specs = [spack.cmd.disambiguate_spec(spec, env, first=args.load_first)
for spec in spack.cmd.parse_specs(args.specs)]
if not args.shell:
specs_str = ' '.join(args.specs) or "SPECS"
spack.cmd.common.shell_init_instructions(
"spack load",
" eval `spack load {sh_arg} %s`" % specs_str,
)
return 1
with spack.store.db.read_transaction():
if 'dependencies' in args.things_to_load:
include_roots = 'package' in args.things_to_load
specs = [dep for spec in specs
for dep in
spec.traverse(root=include_roots, order='post')]
env_mod = spack.util.environment.EnvironmentModifications()
for spec in specs:
env_mod.extend(uenv.environment_modifications_for_spec(spec))
env_mod.prepend_path(uenv.spack_loaded_hashes_var, spec.dag_hash())
cmds = env_mod.shell_modifications(args.shell)
sys.stdout.write(cmds)
| 34.119048
| 79
| 0.664689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,047
| 0.365318
|
674ba1aa522d2bf108faa75b0291c6fcbe497e66
| 1,680
|
py
|
Python
|
poisson_image_editing.py
|
zishun/Poisson-EVA2019
|
de3dd88f4046f63575d02c9395b26a4b1d0b6258
|
[
"BSD-3-Clause"
] | null | null | null |
poisson_image_editing.py
|
zishun/Poisson-EVA2019
|
de3dd88f4046f63575d02c9395b26a4b1d0b6258
|
[
"BSD-3-Clause"
] | null | null | null |
poisson_image_editing.py
|
zishun/Poisson-EVA2019
|
de3dd88f4046f63575d02c9395b26a4b1d0b6258
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
def ind_sub_conversion(img, ind2sub_fn, sub2ind_fn):
rows, cols = img.shape[:2]
num = rows*cols
arange = np.arange(rows*cols, dtype=np.int32)
ind2sub = np.empty((num, 2), dtype=np.int32)
ind2sub[:, 0] = np.floor(arange/cols)
ind2sub[:, 1] = np.remainder(arange, cols)
sub2ind = arange.reshape((rows, cols))
np.save(ind2sub_fn, ind2sub)
np.save(sub2ind_fn, sub2ind)
def pie(FDMC, background, foreground):
Lap, Lap_Solver_Array, Rhs, is_unknown, _, _ = \
FDMC.laplacian_matrix_construction(mask.ravel())
bg = background.reshape((-1, 3))
fg = foreground.reshape((-1, 3))
result = bg.copy()
lap = Lap.dot(fg[is_unknown, :])
lap_rhs = Rhs.dot(fg)
lap_unknown = lap - lap_rhs
poisson_sol = Lap_Solver_Array[0](lap_unknown+Rhs.dot(bg))
result[is_unknown, :] = poisson_sol
result = result.reshape(background.shape)
result[result < 0] = 0.0
result[result > 1] = 1.0
return (result*255).astype(np.uint8)
if __name__ == '__main__':
folder = './data/pie/'
mask = imageio.imread(folder+'mask.png')[:, :, 0].astype(np.float32)
background = imageio.imread(folder+'mona.png')[:, :, :3]/255
foreground = imageio.imread(folder+'gine.png')[:, :, :3]/255
mask[mask > 0] = np.nan
ind2sub_fn = folder+'ind2sub.npy'
sub2ind_fn = folder+'sub2ind.npy'
ind_sub_conversion(mask, ind2sub_fn, sub2ind_fn)
FDMC = FiniteDifferenceMatrixConstruction(ind2sub_fn, sub2ind_fn)
result = pie(FDMC, background, foreground)
imageio.imwrite(folder+'result.png', result)
| 32.941176
| 72
| 0.671429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.054167
|
674c93e05bb72036422e17078331287c9f481a64
| 10,343
|
py
|
Python
|
mindsdb/api/http/initialize.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 261
|
2018-09-28T02:32:17.000Z
|
2018-12-10T06:30:54.000Z
|
mindsdb/api/http/initialize.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 27
|
2018-09-26T08:49:11.000Z
|
2018-12-10T14:42:52.000Z
|
mindsdb/api/http/initialize.py
|
mindsdb/main
|
2c7c09a756c17a47f2ff4a38bf45203d706240ee
|
[
"MIT"
] | 46
|
2018-10-06T10:11:18.000Z
|
2018-12-10T04:02:17.000Z
|
from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
dist_zip_path = str(destignation.joinpath('dist.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'dist-V' + version + '.zip',
'path': dist_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
static_folder = destignation
static_folder.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(dist_zip_path).extractall(static_folder)
if static_folder.joinpath('dist').is_dir():
shutil.move(str(destignation.joinpath('dist').joinpath('index.html')), static_folder)
shutil.move(str(destignation.joinpath('dist').joinpath('assets')), static_folder)
shutil.rmtree(destignation.joinpath('dist'))
os.remove(dist_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_integration_controller = IntegrationController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
| 34.591973
| 178
| 0.667601
| 299
| 0.028908
| 0
| 0
| 98
| 0.009475
| 0
| 0
| 2,473
| 0.239099
|
674df0520020cb5c060d141941c47d1d5a1e8c48
| 9,686
|
py
|
Python
|
pyrocov/io.py
|
corneliusroemer/pyro-cov
|
54e89d128293f9ff9e995c442f72fa73f5f99b76
|
[
"Apache-2.0"
] | 22
|
2021-09-14T04:33:11.000Z
|
2022-02-01T21:33:05.000Z
|
pyrocov/io.py
|
corneliusroemer/pyro-cov
|
54e89d128293f9ff9e995c442f72fa73f5f99b76
|
[
"Apache-2.0"
] | 7
|
2021-11-02T13:48:35.000Z
|
2022-03-23T18:08:35.000Z
|
pyrocov/io.py
|
corneliusroemer/pyro-cov
|
54e89d128293f9ff9e995c442f72fa73f5f99b76
|
[
"Apache-2.0"
] | 6
|
2021-09-18T01:06:51.000Z
|
2022-01-10T02:22:06.000Z
|
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import functools
import io
import logging
import math
import re
import sys
import torch
import torch.multiprocessing as mp
from Bio import AlignIO
from Bio.Phylo.NewickIO import Parser
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from .phylo import Phylogeny
logger = logging.getLogger(__name__)
FILE_FORMATS = {
"nex": "nexus",
"nexus": "nexus",
"fasta": "fasta",
"xml": "beast",
}
def _print_dot():
sys.stdout.write(".")
sys.stdout.flush()
def _handle_translate(lines, context):
map_lines = [line.rstrip(",").split() for line in lines[1:-1]]
context["translate"] = {key: value for key, value in map_lines}
def _handle_tree_count(lines, context):
return 1
def _handle_tree_newick(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree.name = name
# Add translations as .comment attributes
if "translate" in context:
translate = context["translate"]
for leaf in tree.get_terminals():
leaf.comment = translate[leaf.name]
return tree
def _handle_tree_torch(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree = Phylogeny.from_bio_phylo(tree)
_print_dot()
return tree
def _handle_raw(lines, context):
return lines, context
def _apply(fn, args):
return fn(*args)
def read_nexus_trees(filename, *, format="newick", max_num_trees=math.inf, processes=0):
"""
Parse and iterate over newick trees stored in a nexus file.
This streams the file and thus can handle larger files than
``Bio.Phylo.read(..., format="nexus")``.
Returns an iterator of ``Bio.Phylo`` tree objects.
"""
if format == "count":
context = {}
handlers = {"tree": _handle_tree_count}
elif format == "newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_tree_newick}
elif format == "_raw_newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_raw}
elif format == "torch":
context = None
handlers = {"tree": _handle_tree_torch}
elif format == "_raw_torch":
context = None
handlers = {"tree": _handle_raw}
else:
raise ValueError(f"unknown format: {format}")
if processes != 0:
trees = read_nexus_trees(
filename, format="_raw_" + format, max_num_trees=max_num_trees
)
with mp.Pool(processes) as pool:
handler = functools.partial(_apply, handlers["tree"])
yield from pool.imap(handler, trees)
return
with open(filename) as f:
lines = iter(f)
for line in lines:
if line.startswith("Begin trees;"):
break
part = []
for line in lines:
line = line.strip()
part.append(line)
if not line.endswith(";"):
continue
type_ = part[0].split()[0].lower()
handle = handlers.get(type_)
if handle is not None:
tree = handle(part, context)
if tree is not None:
yield tree
max_num_trees -= 1
if max_num_trees <= 0:
break
part = []
def count_nexus_trees(filename):
"""
Counts the number of trees in a nexus file.
"""
return sum(read_nexus_trees(filename, format="count"))
def stack_nexus_trees(filename, *, max_num_trees=math.inf, processes=0):
"""
Loads a batch of trees from a nexus file.
"""
trees = read_nexus_trees(
filename, format="torch", max_num_trees=max_num_trees, processes=processes
)
return Phylogeny.stack(trees)
def read_newick_tree(filename):
"""
Parse a single newick tree and convert to a ``Phylogeny``.
"""
with open(filename) as f:
line = f.read().strip()
tree = next(Parser.from_string(line).parse())
return Phylogeny.from_bio_phylo(tree)
def read_alignment(
filename, format=None, *, max_taxa=math.inf, max_characters=math.inf
):
"""
Reads a single alignment file to a torch tensor of probabilites.
:param str filename: Name of input file.
:param str format: Optional input format, e.g. "nexus" or "fasta".
:param int max_taxa: Optional number of taxa for truncation.
:param int max_characters: Optional number of characters for truncation.
:rtype: torch.Tensor
:returns: A float tensor of shape ``(num_sequences, num_characters,
num_bases)`` that is normalized along its rightmost dimension. Note
that ``num_bases`` is 5 = 4 + 1, where the final base denots a gap or
indel.
"""
# Load a Bio.Align.MultipleSeqAlignment object.
logger.info(f"Loading data from {filename}")
if format is None:
suffix = filename.split(".")[-1].lower()
format = FILE_FORMATS.get(suffix)
if format is None:
raise ValueError("Please specify a file format, e.g. 'nexus' or 'fasta'")
elif format == "nexus":
alignment = _read_alignment_nexus(filename)
elif format == "beast":
alignment = _read_alignment_beast(filename)
else:
alignment = AlignIO.read(filename, format)
# Convert to a single torch.Tensor.
num_taxa = min(len(alignment), max_taxa)
if num_taxa < len(alignment):
alignment = alignment[:num_taxa]
num_characters = min(len(alignment[0]), max_characters)
if num_characters < len(alignment[0]):
alignment = alignment[:, :num_characters]
logger.info(f"parsing {num_taxa} taxa x {num_characters} characters")
codebook = _get_codebook()
probs = torch.full((num_taxa, num_characters, 5), 1 / 5)
for i in range(num_taxa):
seq = alignment[i].seq
if not VALID_CODES.issuperset(seq):
raise ValueError(f"Invalid characters: {set(seq) - VALID_CODES}")
# Replace gaps at ends with missing.
beg, end = 0, probs.size(1)
if seq[0] in "-.N":
seq, old = seq.lstrip(seq[0]), seq
beg += len(old) - len(seq)
if seq[-1] in "-.N":
seq, old = seq.rstrip(seq[-1]), seq
end -= len(old) - len(seq)
probs[i, beg:end] = codebook[list(map(ord, seq))]
assert torch.isfinite(probs).all()
return probs
def _read_alignment_nexus(filename):
# Work around bugs in Bio.Nexus reader.
lines = []
section = None
done = set()
with open(filename) as f:
for line in f:
if line.startswith("BEGIN"):
section = line.split()[-1].strip()[:-1]
elif line.startswith("END;"):
done.add(section)
section = None
if "TAXA" in done and "CHARACTERS" in done:
lines.append(line)
break
elif section == "CHARACTERS":
if "{" in line:
line = re.sub("{([ATCG]+)}", _encode_ambiguity, line)
lines.append(line)
f = io.StringIO("".join(lines))
alignment = AlignIO.read(f, "nexus")
return alignment
def _read_alignment_beast(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
if not line.startswith("<sequence "):
continue
id_ = re.search(r'\bid="([^"]*)"', line).group(1)
seq = re.search(r'\bvalue="([^"]*)"', line).group(1)
result.append(SeqRecord(Seq(seq), id=id_))
return result
# See https://www.bioinformatics.org/sms/iupac.html
NUCLEOTIDE_CODES = {
# [ A, C, G, T, gap]
"?": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"n": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"A": [1 / 1, 0.0, 0.0, 0.0, 0.0], # adenine
"C": [0.0, 1 / 1, 0.0, 0.0, 0.0], # cytosine
"G": [0.0, 0.0, 1 / 1, 0.0, 0.0], # guanine
"T": [0.0, 0.0, 0.0, 1 / 1, 0.0], # thymine
"U": [0.0, 0.0, 0.0, 1 / 1, 0.0], # uracil
"R": [1 / 2, 0.0, 1 / 2, 0.0, 0.0],
"Y": [0.0, 1 / 2, 0.0, 1 / 2, 0.0],
"S": [0.0, 1 / 2, 1 / 2, 0.0, 0.0],
"W": [1 / 2, 0.0, 0.0, 1 / 2, 0.0],
"K": [0.0, 0.0, 1 / 2, 1 / 2, 0.0],
"M": [1 / 2, 1 / 2, 0.0, 0.0, 0.0],
"B": [0.0, 1 / 3, 1 / 3, 1 / 3, 0.0],
"D": [1 / 3, 0.0, 1 / 3, 1 / 3, 0.0],
"H": [1 / 3, 1 / 3, 0.0, 1 / 3, 0.0],
"V": [1 / 3, 1 / 3, 1 / 3, 0.0, 0.0],
"N": [1 / 4, 1 / 4, 1 / 4, 1 / 4, 0.0],
"-": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
".": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
}
VALID_CODES = set(NUCLEOTIDE_CODES)
AMBIGUOUS_CODES = {
frozenset("AG"): "R",
frozenset("CT"): "Y",
frozenset("CG"): "S",
frozenset("AT"): "W",
frozenset("GT"): "K",
frozenset("AC"): "M",
frozenset("CGT"): "B",
frozenset("AGT"): "D",
frozenset("ACT"): "H",
frozenset("ACG"): "V",
frozenset("ACGT"): "N",
}
assert len(AMBIGUOUS_CODES) == 6 + 4 + 1
def _encode_ambiguity(chars):
return AMBIGUOUS_CODES[frozenset(chars.group(1))]
def _get_codebook():
codes = torch.full((256, 5), math.nan)
keys = list(map(ord, NUCLEOTIDE_CODES.keys()))
values = torch.tensor(list(NUCLEOTIDE_CODES.values()))
assert values.sum(-1).sub(1).abs().le(1e-6).all()
codes[keys] = values
return codes
| 31.044872
| 88
| 0.574024
| 0
| 0
| 1,984
| 0.204832
| 0
| 0
| 0
| 0
| 2,258
| 0.23312
|
674dfe34110c0256d54ed4a145016c108d5fa7fa
| 1,439
|
py
|
Python
|
core.py
|
mistifiedwarrior/house_price_prediction
|
c935650130ea6464f948706d057af6f044abbff6
|
[
"MIT"
] | null | null | null |
core.py
|
mistifiedwarrior/house_price_prediction
|
c935650130ea6464f948706d057af6f044abbff6
|
[
"MIT"
] | null | null | null |
core.py
|
mistifiedwarrior/house_price_prediction
|
c935650130ea6464f948706d057af6f044abbff6
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
def convert_to_sqft(str):
tokens = str.split(' - ')
if len(tokens) == 2:
return (float(tokens[0]) + float(tokens[1])) / 2
try:
return float(tokens[0])
except Exception:
return np.NAN
def convert_to_num(num):
tokens = str(num).split(' ')
return float(tokens[0])
def train_model(X, Y):
regression = LinearRegression()
regression.fit(X, Y)
return regression
def get_training_data():
dataframe = pd.read_csv("./Bengaluru_House_Data.csv")
df = dataframe.drop(columns=["area_type", "balcony", "society", "availability"], axis='columns')
df['total_sqft'] = df['total_sqft'].apply(convert_to_sqft)
df['size'] = df['size'].apply(convert_to_num)
locations = pd.get_dummies(df["location"])
df_merge = pd.concat([df.drop(columns=["location"]), locations], axis='columns')
df_merge = df_merge.drop(columns=["Unnamed: 9"], axis='columns')
df_merge = df_merge.dropna()
X = df_merge.drop(['price'], axis='columns')
Y = df_merge['price']
return X, Y
def predict_price(regression, X, location, bhk, total_sqft, bath):
location_index = np.where(X.columns == location)[0][0]
x = np.zeros(len(X.columns))
x[0] = bhk
x[1] = total_sqft
x[2] = bath
if location_index >= 0:
x[location_index] = 1
return regression.predict([x])[0]
| 31.977778
| 100
| 0.648367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 197
| 0.136901
|
674e48cd30f8211b37cb1b97721c2c716552aabd
| 605
|
py
|
Python
|
Python/bank-robbers.py
|
JaredLGillespie/CodinGame
|
7e14078673300f66d56c8af4f66d9bf5d2229fa6
|
[
"MIT"
] | 1
|
2020-01-05T17:44:57.000Z
|
2020-01-05T17:44:57.000Z
|
Python/bank-robbers.py
|
JaredLGillespie/CodinGame
|
7e14078673300f66d56c8af4f66d9bf5d2229fa6
|
[
"MIT"
] | null | null | null |
Python/bank-robbers.py
|
JaredLGillespie/CodinGame
|
7e14078673300f66d56c8af4f66d9bf5d2229fa6
|
[
"MIT"
] | 2
|
2020-09-27T16:02:53.000Z
|
2021-11-24T09:08:59.000Z
|
# https://www.codingame.com/training/easy/bank-robbers
from heapq import *
def calc_vault_time(c, n):
return 10**n * 5**(c - n)
def solution():
robbers = int(input())
vault = int(input())
vault_times = []
for i in range(vault):
c, n = map(int, input().split())
vault_times.append(calc_vault_time(c, n))
active_robbers = []
for vt in vault_times:
if len(active_robbers) < robbers:
heappush(active_robbers, vt)
else:
heappush(active_robbers, vt + heappop(active_robbers))
print(max(active_robbers))
solution()
| 20.862069
| 66
| 0.609917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.089256
|
674e497c1af4728fb031faf7f24fbf2bf5bd7b4b
| 576
|
py
|
Python
|
14Django/day04/BookManager/introduction1.py
|
HaoZhang95/PythonAndMachineLearning
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
[
"MIT"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
14Django/day04/BookManager/introduction1.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 47
|
2019-09-17T10:06:02.000Z
|
2022-03-11T23:46:52.000Z
|
14Django/day04/BookManager/introduction1.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
"""
模板语言:
{{ 变量 }}
{% 代码段 %}
{% 一个参数时:变量|过滤器, Book.id | add: 1 <= 2 当前id+1来和2比较
两个参数时:变量|过滤器:参数 %}, 过滤器最多只能传2个参数,过滤器用来对传入的变量进行修改
{% if book.name|length > 4 %} 管道|符号的左右不能有多余的空格,否则报错,其次并不是name.length而是通过管道来过滤
{{ book.pub_date|date:'Y年m月j日' }} 日期的转换管道
"""
"""
CSRF 跨站请求伪造, 盗用别人的信息,以你的名义进行恶意请求
比如:服务器返回一个表单进行转账操作,再把转账信息返回给服务器。
需要判断发送转账信息请求的客户端是不是刚才获取表单界面的客户端,防止回送请求的修改,和返回页面的修改(表单地址被修改为黑客地址,信息丢失)
防止CSRF需要服务器做安全验证
"""
"""
验证码主要用来防止暴力请求,原理就是请求页面之前生成一个动态不同的验证码写入到session中
用户登录的时候,会拿着填写的验证码和session中的验证码比较进行验证
"""
| 24
| 85
| 0.670139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,210
| 0.996705
|
674eb289511fbd351f416105eb842fadb81a491d
| 291
|
py
|
Python
|
gammapy/maps/__init__.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/maps/__init__.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:55:46.000Z
|
2020-10-29T19:55:46.000Z
|
gammapy/maps/__init__.py
|
watsonjj/gammapy
|
8d2498c8f63f73d1fbe4ba81ab02d9e72552df67
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sky maps."""
from .base import *
from .geom import *
from .hpx import *
from .hpxnd import *
from .hpxsparse import *
from .hpxmap import *
from .wcs import *
from .wcsnd import *
from .wcsmap import *
from .sparse import *
| 22.384615
| 63
| 0.71134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.268041
|
674ebc40e603703da0b0ddbc5fe2fad3846b9a69
| 3,305
|
py
|
Python
|
lhotse/dataset/sampling/utils.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 353
|
2020-10-31T10:38:51.000Z
|
2022-03-30T05:22:52.000Z
|
lhotse/dataset/sampling/utils.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 353
|
2020-10-27T23:25:12.000Z
|
2022-03-31T22:16:05.000Z
|
lhotse/dataset/sampling/utils.py
|
stachu86/lhotse
|
d5e78154db2d4d52f15aaadc8882f76eb5b77640
|
[
"Apache-2.0"
] | 66
|
2020-11-01T06:08:08.000Z
|
2022-03-29T02:03:07.000Z
|
import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
| 39.345238
| 108
| 0.644781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,893
| 0.572769
|
674f2806f73a13483671e5b0ce4735f88b2f1c4f
| 606
|
py
|
Python
|
book/migrations/0010_auto_20170603_1441.py
|
pyprism/Hiren-Mail-Notify
|
324583a2edd25da5d2077914a79da291e00c743e
|
[
"MIT"
] | null | null | null |
book/migrations/0010_auto_20170603_1441.py
|
pyprism/Hiren-Mail-Notify
|
324583a2edd25da5d2077914a79da291e00c743e
|
[
"MIT"
] | 144
|
2015-10-18T17:19:03.000Z
|
2021-06-27T07:05:56.000Z
|
book/migrations/0010_auto_20170603_1441.py
|
pyprism/Hiren-Mail-Notify
|
324583a2edd25da5d2077914a79da291e00c743e
|
[
"MIT"
] | 1
|
2015-10-18T17:04:39.000Z
|
2015-10-18T17:04:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0009_book_folder'),
]
operations = [
migrations.AddField(
model_name='book',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=400, unique=True),
),
]
| 23.307692
| 64
| 0.587459
| 450
| 0.742574
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.20297
|
674faa0b694ce161c45416e214ad1d35c7eb77fc
| 1,218
|
py
|
Python
|
contrib/ComparisonStatistics/Test/test_1.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 62
|
2018-03-30T15:46:56.000Z
|
2021-12-08T23:30:24.000Z
|
contrib/ComparisonStatistics/Test/test_1.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 114
|
2018-03-21T01:12:43.000Z
|
2021-07-05T12:29:54.000Z
|
contrib/ComparisonStatistics/Test/test_1.py
|
CDAT/uvcdat
|
5133560c0c049b5c93ee321ba0af494253b44f91
|
[
"BSD-3-Clause"
] | 14
|
2018-06-06T02:42:47.000Z
|
2021-11-26T03:27:00.000Z
|
#!/usr/bin/env python
import ComparisonStatistics
import cdutil
import os,sys
# Reference
ref = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_dnm-95a.xml')
Ref=cdutil.VariableConditioner(ref)
Ref.var='tas'
Ref.id='reference'
# Test
tst = os.path.join(cdutil.__path__[0],'..','..','..','..','sample_data','tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst)
Tst.var='tas'
Tst.id='test'
# Final Grid
FG=cdutil.WeightedGridMaker()
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now the compall thing
c=ComparisonStatistics.ComparisonStatistics(Tst,Ref,weightedGridMaker=FG)
c.fracmin=.5
c.minyr=3
icall=19
# Let's force the indices to be the same
c.variableConditioner1.cdmsKeywords['time']=('1979','1982','co')
c.variableConditioner2.cdmsKeywords['time']=slice(0,36)
print "Before computing:"
print c.variableConditioner1
#print 'C printing:\n',c
## (test,tfr),(ref,reffrc)=c()
(test,tfr),(ref,reffrc) = c.compute()
print "Test:",test
# Retrieve the rank for th etime_domain 19 (monthly space time)
rank=c.rank(time_domain=19)
print 'Result for Rank:',rank
c.write('tmp.nc',comments='A simple example')
| 24.36
| 91
| 0.728243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 448
| 0.367816
|
674fc8fb47108fcde4353966aaff882285b50e79
| 1,087
|
py
|
Python
|
mathipy/functions/linearithmic.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | 1
|
2021-02-02T02:58:38.000Z
|
2021-02-02T02:58:38.000Z
|
mathipy/functions/linearithmic.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | null | null | null |
mathipy/functions/linearithmic.py
|
BatiDyDx/maths-tools-python
|
e9a58aa669b5f36d7ee01402fe1f16a1db7b0e50
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from mathipy.math import calculus
class Linearithmic(calculus.Function):
"""
f(x) = (mx + h)log_b(kx + a)
"""
function_type = 'Linearithmic'
def __init__(self, m = 1, h = 0, b = 10, a = 0, k = 1):
self.m = m
self.h = h
self.b = b
self.a = a
self.k = k
def find_roots(self) -> tuple:
x1 = - self.h / self.m
x2 = (1 - self.a) / self.k
x1 = x1 if self(x1) == 0 else np.nan
x2 = x2 if self(x2) == 0 else np.nan
return (x1, x2)
def plot_func(self, ax):
ax.scatter(self.find_roots(), (0,0), color=calculus.Function.function_part['roots'])
ax.scatter(0, self.get_yint(), color=calculus.Function.function_part['y-intercept'])
def calculate_values(self, x):
return (self.m * x + self.h) * math.log(self.k * x + self.a, self.b)
def __str__(self):
representation = ''
representation += f'({self.m}x + {self.h})'
representation += f'log_{self.b}({self.k}x + {self.a})'
return representation
| 30.194444
| 92
| 0.554738
| 1,021
| 0.939282
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.130635
|
674feabbfb04fd43b656a2ee09e804a9db0cc338
| 11,479
|
py
|
Python
|
pivot_based_eccv2018/misc/expander/disambiguate.py
|
gujiuxiang/unpaired_im2text_iccv19
|
cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2
|
[
"MIT"
] | 18
|
2019-11-01T13:50:03.000Z
|
2022-03-14T03:07:34.000Z
|
pivot_based_eccv2018/misc/expander/disambiguate.py
|
gujiuxiang/unpaired_im2text_iccv19
|
cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2
|
[
"MIT"
] | 7
|
2020-01-03T13:53:26.000Z
|
2021-03-25T22:55:52.000Z
|
pivot_based_eccv2018/misc/expander/disambiguate.py
|
gujiuxiang/unpaired_im2text_iccv19
|
cf71b82b3d2616b0b1fb5c2dfd7f7832cd1e8ec2
|
[
"MIT"
] | 3
|
2019-09-16T02:03:59.000Z
|
2021-06-12T07:03:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the necessary functions to load a text-corpus from
NLTK, contract all possible sentences, applying POS-tags to the
contracted sentences and compare that with the original text.
The information about which contraction+pos-tag pair gets expanded to
which full form will be saved in a dictionary for use in expander.py
"""
__author__ = "Yannick Couzinié"
# standard library imports
import pprint
import yaml
# third-party library imports
import nltk
# local library imports
import utils
# increase the allowed ram size that the models can use
# nltk.internals.config_java(options='-xmx2G')
def _find_sub_list(sublist, full_list):
"""
Args:
- sublist is a list of words that are supposed to be found in
the full list.
- full list is a list of words that is supposed to be searched
in.
Returns:
- List of tuples with the form
(first_index_of_occurence, last_index_of_occurence)
This function finds all occurences of sublist in the full_list.
"""
# this is the output list
results = []
sublist_len = len(sublist)
# loop over all ind if the word in full_list[ind] matches the first
# word of the sublist
for ind in (i for i, word in enumerate(full_list)
if word == sublist[0]):
# check that the complete sublist is matched
if full_list[ind:ind+sublist_len] == sublist:
# then append this to the results
results.append((ind, ind+sublist_len-1))
return results
def _contract_sentences(expansions,
sent_lst,
use_ner,
ner_args):
"""
Args:
- expansions is a dictionary containing the corresponding
contractions to the expanded words
- sent_lst is a list of sentences, which is itself a list of
words, i.e. [["I", "am", "blue"], [...]].
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- yields tuples of the form
(index of first word that was replaced,
list of words that were replaced,
contracted sentence).
The above example would then give
(0, ["I", "am"], ["I", "'m", "blue"])
Note that uncontractible sentences are not added to the
output.
Since yield is used, iterate over the results. Otherwise it
takes too much time.
This function checks a list of sentences for whether they can be
contracted. It starts with the first two words, then the first three
and then goes on to the second+third, then the second+third+fourth
and so on.
"""
# first find the indices of the sentences that contain contractions
for sent in sent_lst:
if use_ner:
# replace all named entities with the tag in ner_args[1]
# throw away replacement info
sent = utils.sent_to_ner(ner_args[0], sent,
tag=ner_args[1])[0]
# check whether any expansion is present then add the index
# it has a True for every expansion that is present
expansion_bool = [expansion in ' '.join(sent) for expansion
in list(expansions.keys())]
if not any(expansion_bool):
# if no expansions present just continue
continue
# convert the boolean list to a list of indices
expansion_idx = [i for i, boolean in enumerate(expansion_bool)
if boolean]
# the list of relevant expansions for the sentence
relevant_exp = [list(expansions.keys())[i] for i in expansion_idx]
for expansion in relevant_exp:
# first split the contraction up into a list of the same
# length as the expanded string
if len(expansion.split()) in [2, 3, 4]:
# if you contract three or two words,
# just split at apostrophes
contraction = expansions[expansion].split("'")
assert len(contraction) == len(expansion.split())
# add the apostrophes again
contraction[1] = "'" + contraction[1]
if len(contraction) == 3:
contraction[2] = "'" + contraction[2]
if len(contraction) == 4:
contraction[3] = "'" + contraction[3]
else:
# this case is only entered when there is only one word
# input. So assert that this is the case.
assert len(expansion) == 1
# this is a completely pathological case, since
# ambiguous 1-word replacements are not in the common
# list of replacements from wikipedia. But since one can
# openly expand contractions.yaml it is checked.
contraction = expansions[expansion]
# find where the sublist occurs
occurences = _find_sub_list(expansion.split(), sent)
# loop over all first indices of occurences
# and insert the contracted part
for occurence in occurences:
contr_sent = sent[:occurence[0]] + contraction
contr_sent += sent[occurence[0]+len(contraction):]
yield (occurence[0],
sent[occurence[0]:occurence[0]+len(contraction)],
contr_sent)
def _invert_contractions_dict():
"""
This is just a short function to return the inverted dictionary
of the contraction dictionary.
"""
with open("contractions.yaml", "r") as stream:
# load the dictionary containing all the contractions
contractions = yaml.load(stream)
# invert the dictionary for quicker finding of contractions
expansions = dict()
for key, value in contractions.items():
if len(value) == 1:
continue
for expansion in value:
if expansion in expansions:
print("WARNING: As an contraction to {}, {} is replaced with"
" {}.".format(expansion,
expansions[expansion],
key))
expansions[expansion] = key
return expansions
def write_dictionary(pos_model,
sent_lst,
add_tags=0,
use_ner=False,
ner_args=None):
"""
Args:
- pos_model is an instance of StanfordPOSTagger
- sent-lst a list of sentences which themselves are lists of the
single words.
- add_tags is the amount of pos tags used after the
relevant contraction, this can be used to further
disambiguate but (of course) spreads out the data.
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- None, but writes a disambiguations.yaml file with disambiguations
for the ambiguous contractions in contractions.yaml.
Raises:
ValueError if use_ner is True but no ner_model is supplied.
Using the provided list of sentences, contract them and pos-tag them.
Using the pos-tags it is then possible to classify which
(contraction, pos-tag) combinations get expanded to which ambiguous
long form.
"""
# pylint: disable=too-many-locals
if use_ner and (ner_args is None):
raise ValueError("The use_ner flag is True but no NER"
" model has been supplied!")
expansions = _invert_contractions_dict()
output_dict = dict()
ambiguity_counter = 0
for tuple_rslt in _contract_sentences(expansions,
sent_lst,
use_ner=use_ner,
ner_args=ner_args):
# pos tag the sentence
if use_ner:
# first replace the NER tag with "it"
pos_sent = [word.replace(ner_args[1], "it") for word
in tuple_rslt[2]]
# tag the sentence
pos_sent = pos_model.tag(pos_sent)
# and replace it with the tag again
pos_sent = [(tuple_rslt[2][i], word_pos[1]) for i, word_pos
in enumerate(pos_sent)]
else:
pos_sent = pos_model.tag(tuple_rslt[2])
# extract the pos tags on the contracted part
contr_word_pos = pos_sent[tuple_rslt[0]:(tuple_rslt[0] +
len(tuple_rslt[1]))]
if add_tags == 0:
contr_pos = tuple(contr_word_pos)
else:
add_pos_list = pos_sent[len(tuple_rslt[1]):(len(tuple_rslt[1]) +
add_tags)]
add_pos = [pos_word[1] for pos_word in add_pos_list]
contr_pos = tuple(contr_word_pos + add_pos)
# write a dictionary entry connecting the (words, pos) of the
# contraction to the expanded part
word = ' '.join(tuple_rslt[1])
if contr_pos not in output_dict:
output_dict[contr_pos] = dict()
output_dict[contr_pos][word] = 1
# keep track of the progress
print("\n\n ---- \n\n")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
elif word in output_dict[contr_pos].keys():
# check whether the entry is already there
output_dict[contr_pos][word] += 1
continue
else:
# if the combination of pos tags with words already occured
# once then a list has to be made. Ideally this case doesn't
# occur
ambiguity_counter += 1
output_dict[contr_pos][word] = 1
print("\n\n ---- \n\n")
print("AMBIGUITY ADDED!")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
with open("disambiguations.yaml", "w") as stream:
yaml.dump(output_dict, stream)
if __name__ == '__main__':
# if you call this function directly just build the disambiguation
# dictionary.
# load a corpus that has the form of list of sentences which is
# split up into a list of words
SENT_LST = nltk.corpus.brown.sents()
SENT_LST += nltk.corpus.gutenberg.sents()
SENT_LST += nltk.corpus.reuters.sents()
SENT_LST += nltk.corpus.inaugural.sents()
POS_MODEL = utils.load_stanford('pos')
NER_MODEL = utils.load_stanford('ner')
write_dictionary(POS_MODEL,
SENT_LST,
add_tags=1,
use_ner=False,
ner_args=[NER_MODEL, "<NE>"])
| 40.850534
| 77
| 0.586811
| 0
| 0
| 4,207
| 0.366463
| 0
| 0
| 0
| 0
| 5,824
| 0.507317
|
675069879b1d492d1df7599b3ec43ea76978d06f
| 1,881
|
py
|
Python
|
setup.py
|
baye0630/paperai
|
717f6c5a6652d6bc1bdb70d4a248a4751f820ddb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
baye0630/paperai
|
717f6c5a6652d6bc1bdb70d4a248a4751f820ddb
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
baye0630/paperai
|
717f6c5a6652d6bc1bdb70d4a248a4751f820ddb
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable = C0111
from setuptools import find_packages, setup
setup(name="paperai",
# version="1.5.0",
# author="NeuML",
# description="AI-powered literature discovery and review engine for medical/scientific papers",
# long_description=DESCRIPTION,
# long_description_content_type="text/markdown",
# url="https://github.com/neuml/paperai",
# project_urls={
# "Documentation": "https://github.com/neuml/paperai",
# "Issue Tracker": "https://github.com/neuml/paperai/issues",
# "Source Code": "https://github.com/neuml/paperai",
# },
# C:\Users\sxm\Desktop\paperai
# project_urls={
# "Documentation": "C:\\Users\\sxm\\Desktop\\paperai",
# "Source Code": "C:\\Users\\sxm\\Desktop\\paperai",
#},
license="Apache 2.0: C:\\Users\\sxm\\Desktop\\paperai\\LICENSE",
packages=find_packages(where="C:\\Users\\sxm\\Desktop\\paperai\\src\\python"),
package_dir={"": "src\\python"},
keywords="search embedding machine-learning nlp covid-19 medical scientific papers",
python_requires=">=3.6",
entry_points={
"console_scripts": [
"paperai = paperai.shell:main",
],
},
install_requires=[
"html2text>=2020.1.16",
# "mdv>=1.7.4",
"networkx>=2.4",
"PyYAML>=5.3",
"regex>=2020.5.14",
"txtai>=1.4.0",
"txtmarker>=1.0.0"
],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities"
])
| 36.882353
| 102
| 0.569378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,276
| 0.678363
|
6751ed6431d090ba5f0d7abc986bd5b1a678af78
| 3,295
|
py
|
Python
|
hit_analysis/image/cut_reconstruction.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | null | null | null |
hit_analysis/image/cut_reconstruction.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | 8
|
2021-03-30T12:52:01.000Z
|
2022-03-12T00:19:45.000Z
|
hit_analysis/image/cut_reconstruction.py
|
credo-science/credo-classify
|
1cc5e00a4df36c4069c0d0fbc19f579780b79ca5
|
[
"MIT"
] | 1
|
2020-06-12T13:29:34.000Z
|
2020-06-12T13:29:34.000Z
|
from io import BytesIO
from typing import List, Dict
from PIL import Image
from hit_analysis.commons.config import Config
from hit_analysis.commons.consts import IMAGE, CROP_X, CROP_Y, CROP_SIZE, FRAME_DECODED, CLASSIFIED, CLASS_ARTIFACT, ORIG_IMAGE
def append_to_frame(image: Image, detection: dict):
hit_img = detection.get(IMAGE)
cx = detection[CROP_X]
cy = detection[CROP_Y]
w, h = detection[CROP_SIZE]
image.paste(hit_img, (cx, cy, cx + w, cy + h))
# fix bug in early CREDO Detector App: black filled boundary 1px too large
image.paste(image.crop((cx + w - 1, cy, cx + w, cy + h)), (cx + w, cy, cx + w + 1, cy + h))
image.paste(image.crop((cx, cy + h - 1, cx + w, cy + h)), (cx, cy + h, cx + w, cy + h + 1))
image.paste(image.crop((cx + w - 1, cy + h - 1, cx + w, cy + h)), (cx + w, cy + h, cx + w + 1, cy + h + 1))
def replace_from_frame(image: Image, detection: dict):
cx = detection.get(CROP_X)
cy = detection.get(CROP_Y)
w, h = detection.get(CROP_SIZE)
hit_img = image.crop((cx, cy, cx + w, cy + h))
detection[ORIG_IMAGE] = detection[IMAGE]
detection[IMAGE] = hit_img
with BytesIO() as output:
hit_img.save(output, format="png")
# hit_img.save('/tmp/%d.png' % detection.get('id'))
detection[FRAME_DECODED] = output.getvalue()
def do_reconstruct(detections: List[dict], config: Config) -> None:
"""
Reconstruction the fill by black cropped frame in CREDO Detector app v2.
The detection[x]['frame_decoded'] will be replaced by new value, old value will be stored in detection[x]['frame_decoded_orig'].
No any changes when count of detections is less or equal 1
:param detections: should be sorted by detection_id
:param config: config object
"""
if len(detections) <= 1:
return
sp = [str(detections[0].get('device_id')), str(detections[0].get('timestamp'))]
image = Image.new('RGBA', (detections[0].get('width'), detections[0].get('height')), (0, 0, 0))
edge = 'no_edge'
for d in detections:
if d.get('edge'):
edge = 'edge'
for d in reversed(detections):
append_to_frame(image, d)
config.store_png(['recostruct', edge, *sp, 'orig'], d.get('id'), d.get(IMAGE))
for d in detections:
replace_from_frame(image, d)
config.store_png(['recostruct', edge, *sp], d.get('id'), d.get(IMAGE))
if config.out_dir:
image.save('%s/recostruct/%s/%s/frame.png' % (config.out_dir, edge, "/".join(sp)))
def check_all_artifacts(detections: List[dict]) -> bool:
"""
Check if all detections is just classified as artifacts
:param detections: list of detections to check
:return: True - all detections is artifacts
"""
for d in detections:
if d.get(CLASSIFIED) != CLASS_ARTIFACT:
return False
return True
def filter_unclassified(by_timestamp: Dict[int, List[dict]]) -> List[int]:
"""
Filter detections with one or more unclassified as artifact.
:param by_timestamp: detections grouped by timestamp
:return: list of filtered timestamp keys
"""
ret = []
for timestamp, detections in by_timestamp.items():
if not check_all_artifacts(detections):
ret.append(timestamp)
return ret
| 35.815217
| 132
| 0.643703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 990
| 0.300455
|
67525ed3e9b1efee9050769baa49e34f54d058e4
| 7,215
|
py
|
Python
|
tests/st/fallback/control_flow/test_fallback_010_if_in_if.py
|
httpsgithu/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | 1
|
2022-02-23T09:13:43.000Z
|
2022-02-23T09:13:43.000Z
|
tests/st/fallback/control_flow/test_fallback_010_if_in_if.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
tests/st/fallback/control_flow/test_fallback_010_if_in_if.py
|
949144093/mindspore
|
c29d6bb764e233b427319cb89ba79e420f1e2c64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test graph fallback control flow if in if scenario"""
import pytest
import numpy as np
from mindspore import Tensor, ms_function, context
context.set_context(mode=context.GRAPH_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(1)
y = Tensor(2)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == 3
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(1)
y = Tensor(0)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == 2
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_3():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(-2)
y = Tensor(-3)
if x > Tensor(0):
if y > Tensor(1):
return y + 1
return x + 1
return x + y
res = control_flow_if_in_if()
assert res == -5
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_4():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = np.array([1, 2, 3, 4, 5])
y = x % 2
z = Tensor(y)
if (x >= y).all():
if sum(z) > Tensor(2):
z = Tensor(x) + 1
return z
res = control_flow_if_in_if()
assert np.all(res.asnumpy() == np.array([2, 3, 4, 5, 6]))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_5():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = list([1, 2, 3, 4])
if max(x) >= 4:
y = Tensor(sum(x) + max(x))
if y < Tensor(10):
return y
return y - 10
return x
res = control_flow_if_in_if()
assert res == 4
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_else_in_if_else_1():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(7)
if x - y > Tensor(np.array([0])):
x = x - Tensor(3)
if x - y > Tensor(0):
x = x - Tensor(4)
else:
x = x + Tensor(4)
x = x * 2
return x - 1
res = control_flow_if_in_if()
assert res == 21
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_else_in_if_else_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(7)
if x - y > Tensor(np.array([10])):
x = x - Tensor(3)
if x - y > Tensor(0):
x = x - Tensor(4)
else:
x = x + Tensor(4)
x = x * 2
else:
if x > Tensor(15):
m = np.array([1, 2, 3, 4, 5])
elif x < Tensor(-10):
return Tensor(sum(np.array([5, 4, 3, 2, 1])))
else:
m = np.array([-1, -2, -3, -4, -5])
x = Tensor(sum(m))
return x - 1
res = control_flow_if_in_if()
assert res == -16
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_multi_conds():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = np.array([1, 2, 3, 4])
y = np.array([4, 5, 6])
if max(x) <= min(y) and sum(x) == 10:
x += 3
if max(x) <= max(y):
m = Tensor(10)
elif min(x) != max(y) or x.size > y.size:
m = Tensor(20)
else:
m = Tensor(0)
else:
m = Tensor(1)
return m
res = control_flow_if_in_if()
assert res == 20
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_if_in_if_multi_conds_2():
"""
Feature: JIT Fallback
Description: Test fallback with control flow.
Expectation: No exception.
"""
@ms_function
def control_flow_if_in_if():
x = Tensor(10)
y = Tensor(2)
if x > y and x % y == Tensor(0):
x -= Tensor(3)
if x < y:
m = Tensor(10)
elif x > y or x % y == Tensor(0):
m = Tensor(20)
else:
m = x + y
else:
m = Tensor(0)
return m
res = control_flow_if_in_if()
assert res == 20
| 27.43346
| 78
| 0.595981
| 0
| 0
| 0
| 0
| 6,333
| 0.877755
| 0
| 0
| 1,772
| 0.245599
|
6756dc638ee04975afad0eae2f92936de0c1062f
| 42,937
|
py
|
Python
|
EPOpt/SpectrumAnalysis.py
|
ruixueqingyang/GPOEO
|
8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c
|
[
"MIT"
] | 5
|
2021-09-01T18:04:18.000Z
|
2022-02-25T04:48:21.000Z
|
EPOpt/SpectrumAnalysis.py
|
ruixueqingyang/GPOEO
|
8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c
|
[
"MIT"
] | null | null | null |
EPOpt/SpectrumAnalysis.py
|
ruixueqingyang/GPOEO
|
8fe65ac3e0ae4d097fdd0d58878aa2cf3201a18c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from scipy.fftpack import fft, fftshift, ifft
from scipy.fftpack import fftfreq
from scipy.signal import find_peaks
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
import pickle
import warnings
import sys
from scipy.signal.filter_design import maxflat
warnings.filterwarnings("ignore")
MAPEMax = 1e4
MAPEStdMax = 1e4
FigCount = 0
TLowBoundBase = 0.4
TLowBound = TLowBoundBase
TRound = 6
SetTLowBound = False
GrpFigCount = 0
# 计算最小公倍数
def lcm(x, y):
m = max(x, y)
n = min(x, y)
while m%n:
m, n = n, m%n
return x*y//n
# wfr 20210107 求一组短周期的近似公倍数周期
def ApproximateLCM(arrayFactor, minALCM):
arrayALCM = np.array([])
if np.round(minALCM/np.max(arrayFactor)) - minALCM/np.max(arrayFactor) < 0:
ALCM = np.max(arrayFactor) * np.round(minALCM / np.max(arrayFactor))
else:
ALCM = minALCM
# tmpALCM = minALCM
# maxErr = 1.0 # 最大百分比误差
while True:
arrayTmp = ALCM / arrayFactor
arrayInteger = np.round(arrayTmp) # 最接近的周期整数倍
arrayDecimal = np.abs(arrayTmp - arrayInteger) # 与最接近的周期整数倍, 相差的周期百分比
arrayErr = arrayDecimal / arrayInteger
maxErr = np.max(arrayErr)
if maxErr < 0.1:
if len(arrayALCM) > 0:
# 给出几个备选, 但也不能太多, 因为1倍周期和2倍周期无法区分, 因此备选不能接近2倍周期
if (ALCM / arrayALCM[0] > 1.3) \
or (len(arrayALCM) >= len(arrayFactor)) \
or (arrayALCM[-1]-arrayALCM[0] > 0.6 * np.max(arrayFactor)):
break
arrayALCM = np.append(arrayALCM, ALCM)
ALCM += 0.2 * np.max(arrayFactor)
return arrayALCM
# wfr 20210130 先计算 N 位点, 在找出其中 附近采样点分布最密集的 N 位点
# PctRange 用来表征密集程度, 表示要求有 PctRange比例 的 采样点 在 N 位点附近
def FindClusterCenter(arrayP, N, PctRange):
arrayTmpIndex = np.argsort(arrayP) # 将 arrayP 升序排列得到的 索引
PointCount = N - 1 # wfr 20210127 计算 N 位点
arrayPoint = np.linspace(0, 1, PointCount + 2)[1:-1]
arrayPoint = np.floor(arrayPoint * len(arrayP))
arrayPoint = arrayP[arrayTmpIndex[arrayPoint.astype(int)]]
arrayHalfRange = np.ones(PointCount) * (arrayP[arrayTmpIndex[-1]] - arrayP[arrayTmpIndex[0]])
# wfr 20210130 定义 N 位点的 附近 的范围, 0.3 表示 附近点的数量是采样点总数的 30%
# 就是要找到满足该百分比的 对称区间 的范围
# PctRange = 0.3
SampleCount = len(arrayP)
for i in range(PointCount):
# 初始化区间半宽度上下限
HalfRangeMin = 0
HalfRangeMax = np.max([ arrayP[arrayTmpIndex[-1]] - arrayPoint[i], arrayPoint[i] - arrayP[arrayTmpIndex[0]] ])
# 半宽度范围小于一定阈值则不再循环
while HalfRangeMax - HalfRangeMin > 0.03 * (arrayP[arrayTmpIndex[-1]] - arrayP[arrayTmpIndex[0]]):
# 二分搜索, 尝试半宽度范围的中点
HalfRange = (HalfRangeMin + HalfRangeMax) / 2
# 计算范围内采样点数量
SampleCountIn = np.sum( ((arrayPoint[i]-HalfRange) < arrayP) & (arrayP < (arrayPoint[i]+HalfRange)) )
# 更新半宽度范围
if SampleCountIn / SampleCount < PctRange:
HalfRangeMin = HalfRange
elif SampleCountIn / SampleCount >= PctRange:
HalfRangeMax = HalfRange
arrayHalfRange[i] = HalfRange
arrayRangeIndex = np.argsort(arrayHalfRange)
# wfr 20210130 返回附近采样点最密集的 N 位点 及 达到 PctRange 的 最小区间半宽度
return arrayPoint[arrayRangeIndex[0]], arrayHalfRange[arrayRangeIndex[0]]
# wfr 20210126 聚类, 合并区间, 不至于有太多的区间
def GrpClustering(arrayP, GrpFactor):
Mean = np.mean(arrayP)
# wfr 20210130 从 N 位点中挑选聚类的初始中心点, 从其中挑选附近采样点最密集的 N 位点
Point, Diff = FindClusterCenter(arrayP, 5, 0.33)
tmpLow = Point - Diff
tmpUp = Point + Diff
arrayIndexIn = np.argwhere((tmpLow <= arrayP) & (arrayP <= tmpUp)) # 在中位数附近邻域的点的 index
arrayIndexIn = arrayIndexIn[:, 0]
if len(arrayIndexIn) < 1:
print("DistributionMAPE: ERROR: 在中位数邻域没找到采样点")
elif len(arrayIndexIn) > 1:
arrayTmp = (arrayIndexIn[1:] - arrayIndexIn[:(-1)]) > 1 # 后一个 indx 减 前一个 index 大于 1, 说明不连续, 是区间分界
arrayBeginFlag = np.insert(arrayTmp, 0, True) # 区间开始点的 flag 是 true
arrayEndFlag = np.append(arrayTmp, True) # 区间结束点(包含)的 flag 是 true
arrayIndexBegin = arrayIndexIn[arrayBeginFlag] # 区间开始点的 index
arrayIndexEnd = arrayIndexIn[arrayEndFlag] + 1 # 区间结束点(不包含)的 index
else:
arrayIndexBegin = arrayIndexIn
arrayIndexEnd = arrayIndexIn + 1
arrayIndexBeginOrigin = arrayIndexBegin
arrayIndexEndOrigin = arrayIndexEnd
# 一共有 len(arrayIndexBegin)+1 个空隙需要尝试合并(包括首尾两边的空隙)
arrayIsolateFlag = np.zeros(len(arrayIndexBegin)).astype(bool) # 用来表示对应区间是否尝试过进行合并
stdP = np.std(arrayP) * 0.999 # 防止相等时的不确定情况
# wfr 20210202 修改误差种数
# MeanErrorShort = 0.04 # 待被合并的 区间 的 平均值的误差
MeanErrorGap = 0.04 # 待被合并的 间隙 的 平均值的误差
MeanErrorGrp = 0.03 # 合并后区间 的 平均值的误差
ShortGrpPct = 0.04 # 极小区间阈值, 小于阈值的区间将使用更大的误差
ScaleFactor = 2 # 误差增大系数
arrayIsolateIndex = np.argsort(-1 * (arrayIndexEnd - arrayIndexBegin))
i = arrayIsolateIndex[0]
while 0 <= i and i <= len(arrayIndexBegin) - 1:
meanGrp = np.mean(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i]])
isMerged = True
while isMerged == True:
isMerged = False
# wfr 20210127 尝试合并左侧
if i == 0:
if arrayIndexBegin[0] != 0: # 尝试合并最左侧空隙
tmpStd = np.std(arrayP[0: arrayIndexEnd[0]])
tmpMean = np.mean(arrayP[0: arrayIndexEnd[0]])
tmpMeanGap = np.mean(arrayP[0: arrayIndexBegin[0]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[0] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[0] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# 合并成功
arrayIndexBegin[0] = 0
meanGrp = tmpMean
isMerged = True
elif True or arrayIsolateFlag[i-1] == False: # 尝试合并最左侧的 区间 和 空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i - 1]: arrayIndexEnd[i]])
tmpMean = np.mean(arrayP[arrayIndexBegin[i - 1]: arrayIndexEnd[i]])
# tmpMeanPrev = np.mean(arrayP[arrayIndexBegin[i - 1]: arrayIndexBegin[i]])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i - 1]: arrayIndexBegin[i]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[i] - arrayIndexEnd[i-1] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[i] - arrayIndexBegin[i-1] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# and np.abs((meanGrp - tmpMeanPrev) / np.mean([meanGrp, tmpMeanPrev])) < tmpMeanError1:
# 合并成功
arrayIndexBegin = np.delete(arrayIndexBegin, i)
arrayIndexEnd = np.delete(arrayIndexEnd, i - 1)
arrayIsolateFlag = np.delete(arrayIsolateFlag, i - 1)
meanGrp = tmpMean
isMerged = True
i -= 1
# wfr 20210127 尝试合并右侧
if i == len(arrayIndexBegin) - 1:
if arrayIndexEnd[-1] != len(arrayP): # 尝试合并最右侧空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i]:])
tmpMean = np.mean(arrayP[arrayIndexBegin[i]:])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i]:])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if len(arrayP) - arrayIndexEnd[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if len(arrayP) - arrayIndexBegin[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# 合并成功
arrayIndexEnd[-1] = len(arrayP)
meanGrp = tmpMean
isMerged = True
elif True or arrayIsolateFlag[i+1] == False: # 尝试合并最右侧的 区间 和 空隙
tmpStd = np.std(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i + 1]])
tmpMean = np.mean(arrayP[arrayIndexBegin[i]: arrayIndexEnd[i + 1]])
tmpMeanGap = np.mean(arrayP[arrayIndexEnd[i]: arrayIndexBegin[i + 1]])
# tmpMeanBack = np.mean(arrayP[arrayIndexEnd[i]: arrayIndexEnd[i + 1]])
# wfr 20210202 判断 gap 是否极短, 若是则增大误差
if arrayIndexBegin[i+1] - arrayIndexEnd[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGap = ScaleFactor * MeanErrorGap
else:
tmpMeanErrorGap = MeanErrorGap
# wfr 20210202 判断 区间 是否极短, 若是则增大误差
if arrayIndexEnd[i+1] - arrayIndexBegin[i] <= ShortGrpPct * len(arrayP):
tmpMeanErrorGrp = ScaleFactor * MeanErrorGrp
tmpStdP = ScaleFactor * stdP
else:
tmpMeanErrorGrp = MeanErrorGrp
tmpStdP = stdP
if tmpStd < tmpStdP and np.abs((meanGrp - tmpMean) / np.mean([meanGrp, tmpMean])) < tmpMeanErrorGrp \
and np.abs((meanGrp - tmpMeanGap) / np.mean([meanGrp, tmpMeanGap])) < tmpMeanErrorGap:
# and np.abs((meanGrp - tmpMeanBack) / np.mean([meanGrp, tmpMeanBack])) < tmpMeanErrorGap \
# 合并成功
arrayIndexBegin = np.delete(arrayIndexBegin, i + 1)
arrayIndexEnd = np.delete(arrayIndexEnd, i)
arrayIsolateFlag = np.delete(arrayIsolateFlag, i + 1)
meanGrp = tmpMean
isMerged = True
# wfr 20210127 标记 i 为已经尝试过, 找到下一个没尝试过的长度最长的 区间
arrayIsolateFlag[i] = True
i = -1
arrayIsolateIndex = np.argsort(-1 * (arrayIndexEnd - arrayIndexBegin))
if len(arrayIsolateIndex) > 0:
tmp = np.argwhere(arrayIsolateFlag[arrayIsolateIndex] == False)
tmp = tmp[:, 0]
if len(tmp) > 0:
i = arrayIsolateIndex[tmp[0]]
# wfr 20210202 评估聚类结果, 如果有一个长度接近整个周期的区间, 则认为是不应该聚类, 因为聚类后会导致误差异常减小
arrayInterval = arrayIndexEnd - arrayIndexBegin
for i in range(len(arrayInterval)):
if arrayInterval[i] / len(arrayP) > 0.85:
tmp0 = arrayIndexBegin[i] <= arrayIndexBeginOrigin
tmp1 = arrayIndexEndOrigin <= arrayIndexEnd[i]
tmp = np.argwhere(tmp0 & tmp1)[:, 0]
arrayIndexBegin = np.delete(arrayIndexBegin, i)
arrayIndexBegin = np.insert(arrayIndexBegin, i, arrayIndexBeginOrigin[tmp])
arrayIndexEnd = np.delete(arrayIndexEnd, i)
arrayIndexEnd = np.insert(arrayIndexEnd, i, arrayIndexEndOrigin[tmp])
break
# 开始点/结束点 index 交叉存放, 形成区间分界 index 序列
arrayGroupIndex = np.zeros(2 * len(arrayIndexBegin)).astype(int)
arrayGroupIndex[2 * np.arange(0, len(arrayIndexBegin))] = arrayIndexBegin
arrayGroupIndex[1 + 2 * np.arange(0, len(arrayIndexBegin))] = arrayIndexEnd
# 补充 0 和 len(arrayP), 如果缺的话
if arrayGroupIndex[0] != 0:
arrayGroupIndex = np.insert(arrayGroupIndex, 0, 0)
if arrayGroupIndex[-1] != len(arrayP):
arrayGroupIndex = np.append(arrayGroupIndex, len(arrayP))
# fig = plt.figure(figsize=(8, 4)) # 定义一个图像窗口
# ax = fig.add_subplot(111)
# ax.plot(arrayP)
# for v in arrayGroupIndex:
# ax.axvline(x=v, color="black", linestyle="--", linewidth=0.5) # 横座标 v 画一条横线
# plt.show()
# plt.close(fig)
return arrayGroupIndex
# 计算 周期为 T 时的功率分布情况的 MAPE(Mean Absolute Percentage Error)
# 功率波形采样序列 arraySample
# 采样时间序列 arrayTimeStamp
# 周期 T
# 该函数是执行热点, 且有优化空间, 可以更加向量化
# wfr 20210108 传入公倍数周期的最大因子
def DistributionMAPE(T, arraySample, arrayTimeStamp, algorithm = "mean", TInit = -1, TRef = -1):
global MAPEMax, MAPEStdMax
TMAPE = MAPEMax
TMAPEStd = MAPEStdMax
if T > 0.5 * arrayTimeStamp[-1]: # or T < 2:
return TMAPE, TMAPEStd # 排除 长度 大于 测量区间一半的 周期
NumTRegion = int(arrayTimeStamp[-1] / T) # 采样时间内包含多少个周期
SampleInterval = arrayTimeStamp[1]; # 采样间隔
NumSample = int(T / (SampleInterval)) # 一个周期中的采样点数量
arrayTRegionMAPE = np.array([])
arrayIndex = np.arange(0, (NumTRegion + 0.1), 1) # 最后一个区间可能不是完整的, 所以不要最后一个
arrayIndex = arrayIndex * NumSample
arrayIndex = arrayIndex.astype(np.int) # 每个 完整 周期区间的 开始和结束时间戳 的 index
# tmpOffset = len(arraySample) - arrayIndex[-1]
# arrayIndex += int(0.5 * tmpOffset)
# wfr 20211015 1倍周期 / 多倍周期 应该使用不同的 分组聚类数量,
# 这里以 TInit 为基准, 计算分组倍增系数, T > TInit, 则取倍增系数为: 2, 3, 4, ...
# 分组聚类时, 会将每个分组 再拆分为 2, 3, 4 个分组
GrpFactor = int(1)
if TInit > T:
tmp = TInit / T
GrpFactor = int(tmp)
if tmp - GrpFactor > 0.75:
GrpFactor += 1
for indexTRgn in range( NumTRegion - 1 ): # 对于每两个相邻的周期区间, 衡量其功率数据分布相似情况
# wfr 20210106
arrayP0 = arraySample[ arrayIndex[indexTRgn] : arrayIndex[indexTRgn+1] ]
arrayP1 = arraySample[ arrayIndex[indexTRgn+1] : arrayIndex[indexTRgn+2] ]
tmpMean0 = np.mean(arrayP0)
tmpMean1 = np.mean(arrayP1)
tmpStd0 = np.std(arrayP0)
tmpStd1 = np.std(arrayP1)
if tmpStd0 < tmpStd1:
arrayGroupIndex = GrpClustering(arrayP1, GrpFactor)
else:
arrayGroupIndex = GrpClustering(arrayP0, GrpFactor)
# 先取出对应的分组后的数组
NumGroup = len(arrayGroupIndex)-1
arrayGrouped0 = np.zeros( NumGroup )
arrayGrouped1 = np.zeros( NumGroup )
# 两个相邻周期区间分别分成 NumGroup 组, 计算每组中 功率与区间平均功率的差 的平均值
for indexGrp in range(NumGroup):
# wfr 20210106
tmpArray0 = arraySample[ arrayIndex[indexTRgn]+arrayGroupIndex[indexGrp]: arrayIndex[indexTRgn]+arrayGroupIndex[indexGrp+1] ]
tmpArray1 = arraySample[ arrayIndex[indexTRgn+1]+arrayGroupIndex[indexGrp]: arrayIndex[indexTRgn+1]+arrayGroupIndex[indexGrp+1] ]
arrayGrouped0[indexGrp] = np.mean(tmpArray0) - tmpMean0
arrayGrouped1[indexGrp] = np.mean(tmpArray1) - tmpMean1
# 两个相邻周期区间的 对应组 分别两两 求 SMAPE
arrayGroupMAPE = np.abs((arrayGrouped1 - arrayGrouped0) / np.mean([tmpMean0, tmpMean1]) * 100)
if algorithm != "mean":
# 临近搜索时, 不用均值, 而用最值, 这样才更能体现出波形的非周期性
TRegionMAPE = np.max( arrayGroupMAPE )
else:
# wfr 这里改成根据区间采样点数量加权平均
arrayWeight = arrayGroupIndex[1:] - arrayGroupIndex[:-1]
TRegionMAPE = np.average(arrayGroupMAPE, weights=arrayWeight)
# TRegionMAPE = np.mean( arrayGroupMAPE )
arrayTRegionMAPE = np.append(arrayTRegionMAPE, TRegionMAPE)
# end
# wfr 20210828 对于 小备选周期, 划分的周期区间比较多, 很可能有误差非常小的区间, 不应该考虑这些区间, 因为可能处于一个大周期的平台上,
# 因此考虑将 arrayTRegionMAPE 分成 TRef/T 组, 取出每组中的最大误差 重新组成 tmpArray
if TRef < 0 or TRef / T < 1.7:
TMAPE = np.mean(arrayTRegionMAPE)
TMAPEStd = np.std(arrayTRegionMAPE)
else:
tmpEachCount = int(np.ceil(TRef / T)) # 每组中的区间个数
tmpGrpCount = int(np.ceil(len(arrayTRegionMAPE) / tmpEachCount)) # 组数
tmpAppendCount = int(tmpGrpCount * tmpEachCount - len(arrayTRegionMAPE)) # 需要填充的元素个数
tmpArray = np.append(arrayTRegionMAPE, np.zeros(tmpAppendCount)) # 为了 rashape 而填充 0.0 元素
tmpArray = tmpArray.reshape(tmpGrpCount, -1) # reshape 完成分组 [0,1,2], [3,4,5], ...
tmpArray = np.max(tmpArray, axis=1).flatten() # 每组分别求最大值
TMAPE = np.mean(tmpArray)
TMAPEStd = np.std(tmpArray)
return TMAPE, TMAPEStd
# wfr 20210116 这里使用新方法判断是否是无周期
def TCompute(arraySample, SampleInterval, TUpBound, isPlot = False, lastT=0.0, preference=0, TCandidateExtra=np.array([])):
global MAPEMax, MAPEStdMax, TLowBoundBase, TLowBound, SetTLowBound
fs = 1/(SampleInterval/1000)
t = (SampleInterval/1000) * np.arange(0, len(arraySample), 1)
# wfr 20210109 fft 算法采样倍率, 提高该值可以增大最大可以识别的周期, 提高频率/周期的分辨率
# 频率分辨率是不变的, 周期分辨率是变化的, 周期越大分辨率越低
FFTSampleRatio = 1
# wfr 20210109 动态调节 fft算法 采样点数量, 即 FFTSampleRatio
# 若 fft变换 分析出的 周期 接近 能分辨的最大周期, 则 增大 FFTSampleRatio, 再重新 分析
for i in range(2):
num_fft = FFTSampleRatio * t.size
# 傅里叶变换
idx = fftfreq(num_fft, 1/fs)
arrayX = idx[:num_fft//2]
arrayY = fft(arraySample, num_fft)
arrayY = np.abs(arrayY)
arrayY = arrayY[:num_fft//2]
# arrayLogY = np.log10(arrayY[:num_fft//2])
listPeakIndex, Properties = find_peaks( arrayY )
# 取出峰值处的 频率 和 幅值
arrayPeakX = arrayX[listPeakIndex]
arrayPeak = arrayY[listPeakIndex]
arrayPeakIndex = np.argsort(-1 * arrayPeak) # 将 arrayPeak 降序排列得到的 索引
# print("TCompute: len of arrayPeakX = {0}".format(len(arrayPeakX)))
# print("arrayPeak = {0}".format(arrayPeak[arrayPeakIndex[:9]]))
# print("Freq = {0}".format(arrayPeakX[arrayPeakIndex[:9]]))
# print("T = {0}".format(1/arrayPeakX[arrayPeakIndex[:9]]))
# 取出振幅最大的前几个周期, 且不大于周期上界
arrayT = 1 / arrayPeakX[arrayPeakIndex] # 先按照峰值大小 降序排列
arrayPeakOrder = arrayPeak[arrayPeakIndex]
# 先排除大于周期上限的周期
tmpPeakOrder = arrayPeakOrder[(arrayT <= TUpBound)]
arrayT = arrayT[(arrayT <= TUpBound)]
# 再排除峰值不够大的周期
# arrayT = arrayT[(tmpPeakOrder > 0.65 * tmpPeakOrder[0])]
arrayT = arrayT[(tmpPeakOrder > 0.60 * tmpPeakOrder[0])]
arrayT = arrayT[:6]
if len(arrayT) == 0:
print("TCompute ERROR: 没有备选周期!")
return TUpBound, MAPEMax
if i == 0:
# wfr 20210109 计算 能分辨的次最大周期, 常数 3 代表次最大周期, 常数 2 代表最大周期
TUpResolution = FFTSampleRatio * len(arraySample) / (0.5*fs) / 3
if np.max(arrayT) > TUpResolution:
FFTSampleRatio = np.ceil( TUpBound / len(arraySample) * (0.5*fs) * 3 )
else:
break
print("arrayT = {0}".format(arrayT))
if isPlot == True:
# plt.clf()
# plt.ion()
plt.figure(figsize=(14, 6))
ax = plt.subplot(211)
ax.set_title('original signal')
plt.plot(t, arraySample)
ax = plt.subplot(212)
ax.set_title('fft transform')
plt.plot(arrayX, 20 * np.log10(arrayY))
ax.set_xlim(0, 2)
# ax.set_xlim(0, 5)
global FigCount
import os
WorkDir = "./"
FigFile = os.path.join(WorkDir, "TCompute" + str(FigCount) + ".png")
plt.savefig(FigFile)
FigCount += 1
# plt.show()
plt.close()
if len(listPeakIndex) == 0:
print("TCompute ERROR: 计算备选频率/周期出错")
return TUpBound, MAPEMax
# wfr 20210107 求近似最小公倍数周期, 以代替较小的(小于周期下限)周期
arrayFactor = arrayT[ (arrayT < TLowBound) ]
if len(arrayFactor) > 0:
# wfr 20210108 公倍数周期的最大因子, 后续 MAPE计算 以及 邻近搜索 会用到
FactorMax = np.max(arrayFactor) # wfr 20210107 小于阈值的备选周期 中的 最大值
# print("FactorMax = {0}".format(FactorMax))
arrayALCM = ApproximateLCM(arrayFactor, TLowBound)
# print("ALCM = {0}".format(arrayALCM))
arrayScaleT = np.delete(arrayT, np.argwhere(arrayT < TLowBound).flatten()) # wfr 20210107 删除小于阈值的备选周期
# arrayScaleT = np.append(arrayScaleT, arrayFactor[0])
# arrayScaleT = np.insert(arrayScaleT, 0, arrayFactor[0])
# arrayScaleT = np.insert(arrayScaleT, 0, FactorMax) # wfr 20210107 保留 小于阈值的备选周期 中的 最大值
arrayScaleT = np.append(arrayScaleT, FactorMax) # wfr 20210107 保留 小于阈值的备选周期 中的 最大值
LenTNormal = len(arrayScaleT) # wfr 20210108 普通周期的数量
arrayScaleT = np.append(arrayScaleT, arrayALCM)
else:
arrayScaleT = arrayT
LenTNormal = len(arrayScaleT) # wfr 20210108 普通周期的数量
FactorMax = 1e4
arrayScaleT = np.append(arrayScaleT, TCandidateExtra)
print("arrayScaleT = {0}".format(arrayScaleT[:9]))
if int(preference) == int(1):
T_lower = lastT * 0.8
T_upper = lastT * 1.5
elif int(preference) == int(-1):
T_lower = lastT * 0.66
T_upper = lastT * 1.25
if int(preference) != int(0):
print("lastT = {:.2f}, set lowerT = {:.2f}, upperT = {:.2f}".format(lastT, T_lower, T_upper))
print("arrayScaleT remove: [", end="")
tmpArr = arrayScaleT.tolist()
for cand_T in arrayScaleT:
if cand_T < T_lower or cand_T > T_upper:
print("{:.2f}, ".format(cand_T), end="")
tmpArr.remove(cand_T)
arrayScaleT = np.array(tmpArr)
print("]")
if len(arrayScaleT) <= int(0):
print("arrayScaleT add: [", end="")
for cand_T in np.arange(T_lower, T_upper, (T_upper-T_lower-1e-15)/4):
print("{:.2f}, ".format(cand_T), end="")
arrayScaleT = np.append(arrayScaleT, cand_T)
print("]")
else:
pass
# print("==>no preference")
sys.stdout.flush()
arrayTMAPE = MAPEMax * np.ones(len(arrayScaleT))
arrayTMAPEStd = MAPEStdMax * np.ones(len(arrayScaleT))
TInit = arrayScaleT[0]
for i in range( len(arrayScaleT) ): # 对于每个猜测的周期长度
arrayTMAPE[i], arrayTMAPEStd[i] = DistributionMAPE(arrayScaleT[i], arraySample, t, "mean", TInit, np.max(arrayScaleT))
print("arrayTMAPE = {0}".format(arrayTMAPE))
# 如果峰值最高的周期的 MAPE 没有被计算, 即测量时间不够长, 就直接返回不稳定
if sum(arrayTMAPE > MAPEMax - 1) > 0:
# if arrayTMAPE[0] > MAPEMax - 1 or int(2 * sum(arrayTMAPE > MAPEMax - 1)) >= int(len(arrayTMAPE)):
tmpT = np.max(arrayScaleT)
# if int(2 * sum(arrayTMAPE > MAPEMax - 1)) >= int(len(arrayTMAPE)):
# tmpT = np.max(arrayScaleT)
# else:
# tmpT = arrayScaleT[0]
print("TCompute: 本次测量时间不够长")
print("TCompute: TOpt = {0:.2f} s".format(arrayScaleT[0]))
print("TCompute: MAPEOpt = {0:.2f}".format(arrayTMAPE[0]))
# return arrayScaleT[0], arrayTMAPE[0]
return tmpT, -1
# wfr 20210129 修改根据 arrayTMAPE 和 arrayTMAPEStd 判断最优周期的规则
# 先将 arrayTMAPEStdIndex 中的 0 都赋值成 其中的 非0最小值
if np.sum(arrayTMAPEStd < 1e-6) == len(arrayTMAPEStd):
arrayTMAPEStd[:] = 1
elif np.sum(arrayTMAPEStd < 1e-6) > 0:
arrayTMAPEStd[arrayTMAPEStd < 1e-6] = np.min(arrayTMAPEStd[arrayTMAPEStd > 1e-6])
# arrayTError = arrayTMAPE * arrayTMAPEStd # 将两数组相乘, 用乘积来评价 周期 的误差
arrayTIndex = np.argsort(arrayTMAPE) # 将 arrayTmp 升序排序 得到的 索引
IndexOpt = arrayTIndex[0]
# # print("arrayTMAPE = {0}".format(arrayTMAPE[:9]))
# arrayTMAPEIndex = np.argsort(arrayTMAPE) # 将 arrayDistributionErrPct 升序排序 得到的 索引
# arrayTMAPEStdIndex = np.argsort(arrayTMAPEStd) # 将 arrayTMAPEStdIndex 升序排序 得到的 索引
#
# # wfr 20210127 如果最小 MAPE 和 最小 MAPEStd 对应的 T 相同
# if arrayTMAPEIndex[0] == arrayTMAPEStdIndex[0]:
# IndexOpt = arrayTMAPEIndex[0]
# else: # wfr 20210127 如果最小 MAPE 和 最小 MAPEStd 对应的 T 不相同
# arrayPartIndex = np.argwhere(arrayTMAPE - arrayTMAPE[arrayTMAPEIndex[0]] < 0.08 * np.max(arrayScaleT))
# # arrayPartIndex = np.argwhere(arrayTMAPE < 1.5 * arrayTMAPE[arrayTMAPEIndex[0]])
# arrayPartIndex = arrayPartIndex[:,0]
# tmp = np.argsort(arrayTMAPEStd[arrayPartIndex]) # 将 arrayTMAPEStdIndex 升序排序 得到的 索引
# IndexOpt = arrayPartIndex[tmp[0]]
# wfr 20210108 判断最优周期是 公倍数周期 还是 普通周期, 从而设置 公倍数周期的最大因子, 以支持后续 MAPE 计算
TOpt = arrayScaleT[IndexOpt]
MAPEOpt = arrayTMAPE[IndexOpt]
MAPEStdOpt = arrayTMAPEStd[IndexOpt]
np.set_printoptions(precision=2, suppress=True)
# print("TOpt = {0}".format(TOpt))
# print("arrayScaleT: {0}".format(arrayScaleT))
# # print("arrayTError: {0}".format(arrayTError))
# print("arrayTMAPE: {0}".format(arrayTMAPE))
# # print("arrayTMAPEStd: {0}".format(arrayTMAPEStd))
for i in range(3):
# wfr 20210109 因为 fft变换 频率分辨率是不变的, 周期是频率的倒数, 其分辨率是变化的
# 频率越低/周期越长, 周期分辨率越低, 所以需要动态调节邻近搜索区间
# 频谱分析得到的周期可能有误差(因为周期分辨率的限制), 因此需要在此周期附近进行局部搜索, 得到更精确的周期
# wfr 20210109 中心频率 及 频率刻度
FreqCenter = 1 / TOpt
FreqResolution = (0.5 * fs) / len(arraySample)
# wfr 20210109 先计算频率邻近区间
FreqLow = FreqCenter - 0.7 * FreqResolution
FreqUp = FreqCenter + 0.7 * FreqResolution
# wfr 20210109 再计算周期邻近区间
TLow = 1 / FreqUp
if FreqLow > 0:
TUp = 1 / FreqLow
else:
TUp = 1.5 * TOpt
# wfr 20210113 确保邻近搜索范围大于等于 +/-15%
TLow = np.min([0.85 * TOpt, TLow])
TUp = np.max([1.15 * TOpt, TUp])
# wfr 20210109 用因子限制上下界, 防止对于公倍数周期过度搜索
TLow = np.max([(TOpt-0.5*FactorMax), TLow])
# TLow = np.max([TLowBound, TLow]) # wfr 20210121 搜索区间下限不小于 TUpBound
TUp = np.min([(TOpt+0.5*FactorMax), TUp])
# wfr 20210109 区间最多分8份
TStep = (TUp - TLow) / 10
TStep = np.min([TStep, 1]) # wfr 20210113 步长的上限是 1s
TStep = np.max([TStep, 1 * SampleInterval/1000])
if TUp - TLow <= TStep:
return TOpt, MAPEOpt
# wfr 20210109 生成邻近搜索序列, 要包括区间端点
arraySearchT = np.arange(TLow, TUp, TStep)
arraySearchT = np.append(arraySearchT, TUp)
arraySearchT = np.append(arraySearchT, TOpt)
arraySearchTMAPE = MAPEMax * np.ones(len(arraySearchT))
arraySearchTMAPEStd = MAPEStdMax * np.ones(len(arraySearchT))
# 对于每个备选的周期, 计算 MAPE, MAPE越小越好, 越小说明功率变化/分布情况越一致
TInit = TOpt
for i in range( len(arraySearchT) ):
arraySearchTMAPE[i], arraySearchTMAPEStd[i] = DistributionMAPE(arraySearchT[i], arraySample, t, "mean", TInit, np.max(arrayScaleT))
# arrayTIndex = np.argsort(arraySearchTMAPE) # 将 arrayTmp 升序排序 得到的 索引
# IndexOpt = arrayTIndex[0]
IndexOpt = np.argmin(arraySearchTMAPE)
TOpt = arraySearchT[IndexOpt]
MAPEOpt = arraySearchTMAPE[IndexOpt]
if IndexOpt == len(arraySearchT) - 1 or (0.2 * (len(arraySearchT)-1) < IndexOpt and IndexOpt < 0.8 * (len(arraySearchT)-1)):
break
print("TCompute: arraySearchT: {0}".format(arraySearchT))
# print("TCompute: arrayTError: {0}".format(arrayTError))
print("TCompute: arraySearchTMAPE: {0}".format(arraySearchTMAPE))
# print("TCompute: arraySearchTMAPEStd: {0}".format(arraySearchTMAPEStd))
# wfr 20210108 放大太短的周期
if TOpt < TLowBound:
# wfr 20210120 自适应调节周期下限, 保证不会相差一个周期, 或者相差一个周期, 但是仍然在 10% 以内
# if SetTLowBound == False and TLowBound / TOpt > 6:
# TLowBound = np.ceil(10 * TOpt)
# SetTLowBound = True
TOpt = TOpt * np.round(TLowBound / TOpt)
print("TCompute: TOpt = {0:.2f} s".format(TOpt))
# print("TCompute: MAPEOpt = {0:.2f}".format(MAPEOpt))
# print("")
return TOpt, MAPEOpt
def NotPeriodic(arraySample, SampleInterval, T):
TFixed = 8
N = 4
SigmaMax = 1
SigmaPctMax = 0.04
MeanErrMax = 0.04
DiffErrMax = 0.20
arraySample = arraySample[5:-5]
# 采样时间过短 直接返回 False
if (len(arraySample)-1) * (SampleInterval/1000) < N * TFixed:
if T < TFixed:
TFixed = T
if (len(arraySample)-1) * (SampleInterval/1000) < N * TFixed:
return False
Step = int(TFixed / (SampleInterval / 1000))
arrayRegion = np.array([])
arrayMean = np.array([])
arrayStd = np.array([])
arrayDiffMax = np.array([])
for i in range(N):
begin = len(arraySample) - ((i+1) * Step)
end = len(arraySample) - (i * Step)
arrayTmp = np.array(arraySample[begin:end])
arrayRegion = np.append(arrayRegion, arrayTmp)
arrayMean = np.append(arrayMean, np.mean(arrayTmp))
arrayStd = np.append(arrayStd, np.std(arrayTmp))
arrayDiffMax = np.append(arrayDiffMax, np.max(np.abs( arrayTmp - arrayMean[i] )))
# 如果相邻两区间平均功率相差过大则认为不是无周期
for i in range(N-1):
if np.abs(arrayMean[i]-arrayMean[i+1]) / (np.mean(arrayMean[i:i+1])) > MeanErrMax:
return False
# 如果 标准差 过大则认为不是无周期
# if np.max(arrayStd) > SigmaMax:
# return False
if np.max(arrayStd/arrayMean) > SigmaPctMax:
return False
# 如果 最大/最小值 超限则认为不是无周期
if np.max(arrayDiffMax / arrayMean) > DiffErrMax:
return False
print("NotPeriodic: 无周期")
return True
# wfr 20201230
def T_SpectrumAnalysis(listSample, SampleInterval, TUpBound, MeasureTFactor, TraceFileName, StrictMode="normal", lastT=-1, preference=0, isPlot = False):
global MAPEMax, FigCount, TLowBoundBase, TLowBound, SetTLowBound
FigCount = 0
TLowBound = TLowBoundBase
SetTLowBound = False
# print("SpectrumAnalysis: 采样点数量 = {0}".format(len(listSample)))
arraySample = np.array(listSample) # 去除刚开始的采样点, 因为可能异常偏低
MeasureDuration = (len(arraySample)-1) * (SampleInterval/1000)
arrayT = np.array([])
arraySMAPE = np.array([])
isStable = False
MeasureDurationNext = -1
# 保存原始数据到文件
if len(TraceFileName) > 0:
FileDir = "./"+TraceFileName+".pkl"
pickle.dump(listSample, open(FileDir, "wb"))
# 低通滤波
# 采样频率 10Hz, 要滤除 Threshold Hz 以上的频率成分
# SampleFreq = 1/(SampleInterval/1000)
# Threshold = 2
# Wn = 2 * Threshold / SampleFreq
# b, a = signal.butter(8, Wn, 'lowpass')
# SampleFilted = signal.filtfilt(b, a, arraySample)
SampleFilted = arraySample
tmpT, tmpSMAPE = TCompute(SampleFilted, SampleInterval, TUpBound, isPlot)
# 如果测量时间不够长, 就直接返回不稳定
# wfr 20211027 尽早判断出是非周期应用, 尽早返回
if MeasureDuration >= 3 * TUpBound and tmpT > TUpBound:
T = TUpBound
isStable = False
MeasureDurationNext = -1
return T, isStable, MeasureDurationNext
elif MeasureDuration >= 3 * TUpBound:
tmpIndex = int(1/(SampleInterval/1000) * 0.33 * TUpBound)
tmpT1, tmpSMAPE1 = TCompute(SampleFilted[tmpIndex:], SampleInterval, TUpBound, isPlot)
tmpIndex = int(1/(SampleInterval/1000) * 0.66 * TUpBound)
tmpT2, tmpSMAPE2 = TCompute(SampleFilted[tmpIndex:], SampleInterval, TUpBound, isPlot)
tmpArrayT = np.array([tmpT, tmpT1, tmpT2])
SMAPE = abs( (np.max(tmpArrayT)-np.min(tmpArrayT)) / np.mean(tmpArrayT) )
if SMAPE > 0.3:
T = min(TUpBound, tmpT)
isStable = False
MeasureDurationNext = -1
return T, isStable, MeasureDurationNext
elif tmpT > (0.5 * MeasureDuration):
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(tmpT))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
arrayT = np.append(arrayT, tmpT)
arraySMAPE = np.append(arraySMAPE, tmpSMAPE)
StepFactor = 0.5
MinStep = MeasureDuration / 10
tmpStep = np.max([tmpT * StepFactor, MinStep])
MeasureDurationLeft = MeasureDuration - tmpStep
while MeasureDurationLeft / np.max(arrayT) >= 2.8:
# 计算剩余采样点的起始 index
tmpIndexBegin = int( (MeasureDuration-MeasureDurationLeft) / (SampleInterval/1000) )
arrayPart = SampleFilted[tmpIndexBegin:]
# tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), arrayT)
# tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), arrayT[-1])
tmpT, tmpSMAPE = TCompute(arrayPart, SampleInterval, TUpBound, isPlot, lastT, int(preference), [])
arrayT = np.append(arrayT, tmpT)
arraySMAPE = np.append(arraySMAPE, tmpSMAPE)
if tmpT > (0.5 * MeasureDurationLeft):
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(tmpT))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
# 每次减去上次周期的长度, 且设置每次步长的下限, 防止步长太小
tmpStep = np.max([tmpT * StepFactor, MinStep])
MeasureDurationLeft = MeasureDurationLeft - tmpStep
# MeasureDurationLeft = MeasureDurationLeft - np.mean(arrayT)
print("T_SpectrumAnalysis: arrayT: {0}".format(arrayT))
print("T_SpectrumAnalysis: arraySMAPE: {0}".format(arraySMAPE))
# wfr 20210828 如果 测量区间长度 / 最好周期 < MeasureTFactor 则认为 测量时间不够长
tmpIndex = np.argwhere(arraySMAPE < 0).flatten() # SMAPE < 0 说明 TCompute 认为测量时间不够长
arraySMAPE[tmpIndex] = np.min(arraySMAPE)
tmpIndex = np.argsort(arraySMAPE).flatten() # 将 tmpArraySMAPE 升序排列得到的 索引
# tmpT = np.max(arrayT[tmpIndex])
tmpT = arrayT[0]
if MeasureDuration / tmpT < MeasureTFactor:
T = tmpT
isStable = False
if MeasureDuration < 3 * TUpBound:
MeasureDurationNext = max(0.5 * tmpT, (3.1 * TUpBound - MeasureDuration)) # 测量够 3.1倍 周期
else:
MeasureDurationNext = max(0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)) # 测量够 5倍 周期
print("T_SpectrumAnalysis: 本次测量时间不够长")
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(T))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
if StrictMode == "strict":
tmpThreshold = 0.10
elif StrictMode == "relaxed":
tmpThreshold = 0.30
else:
tmpThreshold = 0.15
tmpThreshold1 = tmpThreshold + 0.05
LenT = len(arrayT)
LenMin = 3
LenMax = 8
tmpIndex = np.argsort(arraySMAPE) # 将 tmpArraySMAPE 升序排列得到的 索引
T = arrayT[tmpIndex[0]]
tmpIndex = np.argsort(arrayT) # 将 tmpArraySMAPE 升序排列得到的 索引
IndexMiddle = tmpIndex[round(len(tmpIndex)/2)]
TMiddle = arrayT[IndexMiddle]
if LenT < LenMin: # wfr 测量区间还比较短
T = np.mean(arrayT[(0.65*T<arrayT)&(arrayT<1.35*T)])
SMAPE = abs( (np.max(arrayT)-np.min(arrayT)) / np.mean(arrayT) )
print("T_SpectrumAnalysis: SMAPE = {0:.2f}".format(SMAPE))
isStable = False
if MeasureDuration > 2.1 * TUpBound and SMAPE > tmpThreshold:
MeasureDurationNext = -1
# 测量够 5倍 周期
elif MeasureDuration < MeasureTFactor * np.max(arrayT):
# MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
# MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
tmpNext = np.max([np.max(arrayT), MinStep])
if tmpNext + MeasureDuration < MeasureTFactor * np.max(arrayT):
MeasureDurationNext = tmpNext
else:
MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
else:
MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
elif LenMin <= LenT:
tmp = min(LenT, LenMax)
if LenT > 8:
tmpIndexBegin = int(round(0.2 * LenT))
tmpIndexEnd = int(round(0.8 * LenT)) - 1
tmpIndex = np.arange(tmpIndexBegin,tmpIndexEnd)
elif LenT >= 4: # wfr 20211012 排除部分异常值
# tmpIndex = np.argwhere((0.65*T<arrayT) & (arrayT<1.35*T)).flatten()
tmpIndex = np.argwhere((0.60*TMiddle<arrayT) & (arrayT<1.35*TMiddle)).flatten()
else:
tmpIndexBegin = max(0, LenT-tmp)
tmpIndexEnd = LenT
tmpIndex = np.arange(tmpIndexBegin,tmpIndexEnd)
tmpArrayT = np.array(arrayT[tmpIndex])
print("T_SpectrumAnalysis: tmpArrayT = {}".format(tmpArrayT))
tmpArraySMAPE = np.array(arraySMAPE[tmpIndex])
SMAPE = abs( (np.max(tmpArrayT)-np.min(tmpArrayT)) / np.mean(tmpArrayT) )
print("T_SpectrumAnalysis: SMAPE = {0:.2f}".format(SMAPE))
tmpIndex = np.argsort(tmpArraySMAPE) # 将 tmpArraySMAPE 升序排列得到的 索引
T = tmpArrayT[tmpIndex[0]]
T = np.mean(tmpArrayT[(0.65*T<tmpArrayT)&(tmpArrayT<1.35*T)])
if SMAPE < tmpThreshold: # wfr 20201231 对称平均百分误差较小则认为稳定, 停止测量
isStable = True
MeasureDurationNext = -1
elif tmpThreshold <= SMAPE and SMAPE < tmpThreshold1:
isStable = False
if MeasureDuration > 2.1 * TUpBound:
MeasureDurationNext = -1
# 测量够 MeasureTFactor倍 周期
elif MeasureDuration < MeasureTFactor * np.max(arrayT):
# MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
tmpNext = np.max([np.max(arrayT), MeasureDuration / 10])
if tmpNext + MeasureDuration < MeasureTFactor * np.max(arrayT):
MeasureDurationNext = tmpNext
else:
MeasureDurationNext = MeasureTFactor * np.max(arrayT) - MeasureDuration + 5
else:
MeasureDurationNext = np.ceil(MeasureDuration / np.max(arrayT)) * np.max(arrayT) - MeasureDuration + 5
elif tmpThreshold1 <= SMAPE: # 最近几次周期相差较大, 测量区间远大于最近的最大周期
isStable = False
if MeasureDuration > 2.1 * TUpBound:
MeasureDurationNext = -1
elif MeasureDuration > TUpBound and T < 0.5 * TUpBound:
MeasureDurationNext = -1
elif MeasureDuration >= 1.3 * MeasureTFactor * np.max(arrayT):
MeasureDurationNext = -1
else:
tmpTMax = np.max(tmpArrayT)
MeasureDurationNext = MeasureTFactor * tmpTMax - MeasureDuration
if MeasureDurationNext < 0:
MeasureDurationNext = tmpTMax * ( np.ceil(MeasureDuration/tmpTMax) + 1 - MeasureTFactor )
if 0 < MeasureDurationNext and MeasureDurationNext < 5: # wfr 20210110 下次等待测量时间要 > 5s
MeasureDurationNext += 5
# wfr 20210116 这里使用新方法判断是否是无周期 5 * T
if isStable == False and SMAPE > 0.4 and (LenT >= LenMax or MeasureDuration >= 5 * T or MeasureDuration > TUpBound):
if True == NotPeriodic(arraySample, SampleInterval, T):
# if T < TLowBound:
# T = np.ceil(TLowBound / T) * T
MeasureDurationNext = -1
if MeasureDurationNext > 0:
MeasureDurationNext = np.max([MeasureDurationNext, 0.5 * tmpT, (MeasureTFactor * tmpT - MeasureDuration)]) # 测量够 5倍 周期
print("T_SpectrumAnalysis: TOpt = {0:.2f} s".format(T))
print("T_SpectrumAnalysis: isStable = {0}".format(isStable))
print("T_SpectrumAnalysis: MeasureDurationNext = {0}".format(MeasureDurationNext))
return T, isStable, MeasureDurationNext
def TComputeFFT(arraySample, SampleInterval, TUpBound, isPlot = False):
fs = 1/(SampleInterval/1000)
t = (SampleInterval/1000) * np.arange(0, len(arraySample), 1)
num_fft = t.size
# 傅里叶变换
idx = fftfreq(num_fft, 1/fs)
arrayX = idx[:num_fft//2]
arrayY = fft(arraySample, num_fft)
arrayY = np.abs(arrayY)
arrayY = arrayY[:num_fft//2]
# arrayLogY = np.log10(arrayY[:num_fft//2])
listPeakIndex, Properties = find_peaks( arrayY )
# 取出峰值处的 频率 和 幅值
arrayPeakX = arrayX[listPeakIndex]
arrayPeak = arrayY[listPeakIndex]
arrayPeakIndex = np.argsort(-1 * arrayPeak) # 将 arrayPeak 降序排列得到的 索引
# print("TCompute: len of arrayPeakX = {0}".format(len(arrayPeakX)))
# print("arrayPeak = {0}".format(arrayPeak[arrayPeakIndex[:9]]))
# print("Freq = {0}".format(arrayPeakX[arrayPeakIndex[:9]]))
# print("T = {0}".format(1/arrayPeakX[arrayPeakIndex[:9]]))
# 取出振幅最大的前几个周期
arrayT = 1 / arrayPeakX[arrayPeakIndex] # 先按照峰值大小 降序排列
arrayPeakOrder = arrayPeak[arrayPeakIndex]
# 再排除峰值不够大的周期
arrayT = arrayT[(arrayPeakOrder > 0.65 * arrayPeakOrder[0])]
print("TComputeFFT: T = {0:.2f} s".format(arrayT[0]))
sys.stdout.flush
return arrayT[0]
def T_FFT(listSample, SampleInterval, TUpBound, MeasureTFactor, TraceFileName, isPlot = False):
T = 1.0
arraySample = np.array(listSample)
MeasureDuration = (len(arraySample)-1) * (SampleInterval/1000)
MeasureDurationNext = -1
# 保存原始数据到文件
if len(TraceFileName) > 0:
FileDir = "./"+TraceFileName+".pkl"
pickle.dump(listSample, open(FileDir, "wb"))
T = TComputeFFT(arraySample, SampleInterval, TUpBound, isPlot)
# 如果测量时间不够长, 就直接返回不稳定
if MeasureTFactor * T > MeasureDuration:
MeasureDurationNext = max(0.5 * T, (MeasureTFactor * T - MeasureDuration)) # 测量够 3倍 周期
print("T_FFT: 本次测量时间不够长")
print("T_FFT: T = {0:.2f} s".format(T))
print("T_FFT: MeasureDurationNext = {}".format(MeasureDurationNext))
return T, MeasureDurationNext
| 42.851297
| 153
| 0.613038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15,859
| 0.329114
|
6757319350181b82afbdb20fa5b589436eb598b6
| 3,623
|
py
|
Python
|
slippy/core/tests/test_materials.py
|
KDriesen/slippy
|
816723fe6ab9f5ed26b14b4fe0f66423649b85e6
|
[
"MIT"
] | 12
|
2020-12-06T15:30:06.000Z
|
2021-12-14T06:37:15.000Z
|
slippy/core/tests/test_materials.py
|
KDriesen/slippy
|
816723fe6ab9f5ed26b14b4fe0f66423649b85e6
|
[
"MIT"
] | null | null | null |
slippy/core/tests/test_materials.py
|
KDriesen/slippy
|
816723fe6ab9f5ed26b14b4fe0f66423649b85e6
|
[
"MIT"
] | 5
|
2021-03-18T05:53:11.000Z
|
2022-02-16T15:18:43.000Z
|
import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
def test_materials_basic():
# check that one of influence matrix or displacement from loading is given
for material in core.materials._IMMaterial._subclass_registry:
if material in exceptions:
continue
try:
mat_params = material_parameters[material.material_type]
except KeyError:
raise AssertionError(f"Material test parameters are not specified, for material {material.material_type}")
mat_instance = material(**mat_params[0])
max_load = mat_params[1].pop('_max_load', 1)
np.random.seed(0)
loads = np.random.rand(16, 16) * max_load
# check that the loads and displacement functions are inverse of each other
for direction in {'x', 'y', 'z'}:
load_in_direction = {direction: loads}
displacement = mat_instance.displacement_from_surface_loads(load_in_direction, **mat_params[1])
set_disp = displacement[direction]
loads_calc = mat_instance.loads_from_surface_displacement(displacements={direction: set_disp},
**mat_params[2])
npt.assert_allclose(loads, slippy.asnumpy(loads_calc[direction]), atol=max_load * 0.02)
def test_elastic_coupled():
mat = core.Elastic('steel_6', {'E': 200e9, 'v': 0.3})
np.random.seed(0)
loads1 = np.random.rand(16, 16)
loads2 = np.random.rand(16, 16)
directions = 'xyzx'
for i in range(3):
dir_1 = directions[i]
dir_2 = directions[i+1]
loads_in_direction = {dir_1: loads1, dir_2: loads2}
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=True)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=True)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=False)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=False)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
| 43.650602
| 118
| 0.666299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,121
| 0.309412
|
675790d51afdb63e5ecaf1442d2db56ff733f532
| 2,602
|
py
|
Python
|
python/dash_tools/restore_from_bup.py
|
Dash-Industry-Forum/media-tools
|
66be01ce09c8998d47d05729e0721857b2517017
|
[
"BSD-3-Clause"
] | 60
|
2017-01-02T07:44:17.000Z
|
2022-03-29T07:39:53.000Z
|
media-tools/python/dash_tools/restore_from_bup.py
|
roolrz/ABR-Alg-Implementation
|
02ba8fbc804eeabeae1dcd51d359c6b0a2dc7566
|
[
"MIT"
] | 4
|
2018-03-23T07:56:21.000Z
|
2021-11-22T06:45:12.000Z
|
media-tools/python/dash_tools/restore_from_bup.py
|
roolrz/ABR-Alg-Implementation
|
02ba8fbc804eeabeae1dcd51d359c6b0a2dc7566
|
[
"MIT"
] | 36
|
2016-08-04T14:28:30.000Z
|
2022-03-20T09:41:17.000Z
|
#!/usr/bin/env python
"""Restore files with ending BACKUP_ENDING to original files."""
# The copyright in this software is being made available under the BSD License,
# included below. This software may be subject to other third party and contributor
# rights, including patent rights, and no such rights are granted under this license.
#
# Copyright (c) 2016, Dash Industry Forum.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of Dash Industry Forum nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from backup_handler import BACKUP_ENDING
def main():
"Command-line function."
from optparse import OptionParser
parser = OptionParser()
#pylint: disable=unused-variable
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Wrong number of arguments")
sys.exit(1)
for file_name in args:
if file_name.endswith(BACKUP_ENDING):
old_name = file_name[:-len(BACKUP_ENDING)]
print("moving %s to %s" % (file_name, old_name))
if os.path.exists(old_name):
os.unlink(old_name)
os.rename(file_name, old_name)
continue
if __name__ == "__main__":
main()
| 41.967742
| 85
| 0.737894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,965
| 0.755188
|
67588c7659b325ae0aa6ae1b1ce63ec6f84fa51d
| 4,851
|
py
|
Python
|
src/opendr/simulation/human_model_generation/utilities/joint_extractor.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 3
|
2021-06-24T01:54:25.000Z
|
2021-12-12T16:21:24.000Z
|
src/opendr/simulation/human_model_generation/utilities/joint_extractor.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 79
|
2021-06-23T10:40:10.000Z
|
2021-12-16T07:59:42.000Z
|
src/opendr/simulation/human_model_generation/utilities/joint_extractor.py
|
makistsantekidis/opendr
|
07dee3b59d3487b9c5a93d6946317178a02c9890
|
[
"Apache-2.0"
] | 5
|
2021-07-04T07:38:50.000Z
|
2021-12-12T16:18:47.000Z
|
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import numpy as np
import sklearn.preprocessing
class Joint_extractor:
def __init__(self, num_of_joints=18):
self.num_of_joints = num_of_joints
self.start_points = []
self.end_points = []
for j in range(18):
self.start_points.append([])
self.end_points.append([])
def compute_rays(self, cv_kps, image_width, image_height):
pmat = (pyglet.gl.GLdouble * 16)()
mvmat = (pyglet.gl.GLdouble * 16)()
view = (pyglet.gl.GLint * 4)()
pyglet.gl.glGetDoublev(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)
pyglet.gl.glGetDoublev(pyglet.gl.GL_PROJECTION_MATRIX, pmat)
pyglet.gl.glGetIntegerv(pyglet.gl.GL_VIEWPORT, view)
if cv_kps.size != 0:
for i, cv_kp in enumerate(cv_kps):
if cv_kp[0] != -1 and cv_kp[0] != -1:
start_x = pyglet.gl.GLdouble()
start_y = pyglet.gl.GLdouble()
start_z = pyglet.gl.GLdouble()
end_x = pyglet.gl.GLdouble()
end_y = pyglet.gl.GLdouble()
end_z = pyglet.gl.GLdouble()
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat, view, start_x,
start_y, start_z)
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat, view, end_x, end_y,
end_z)
self.start_points[i].append(np.asarray([start_x.value, start_y.value, start_z.value]))
self.end_points[i].append(np.asarray([end_x.value, end_y.value, end_z.value]))
@property
def compute_3D_positions(self):
for i in range(self.num_of_joints):
if len(self.start_points[i]) == 0 or len(self.end_points[i]) == 0:
print("Failed to estimate the position of the joints...")
return [[], []]
points_3D = []
dists_3D = []
inds_sorted = None
for i in range(self.num_of_joints):
d = 100
first_time = True
while d > 0.05:
if first_time:
s = np.asarray(self.start_points[i])
e = np.asarray(self.end_points[i])
else:
s = s[inds_sorted[:-1]]
e = e[inds_sorted[:-1]]
v = e - s
ni = sklearn.preprocessing.normalize(v, norm="l2")
nx = ni[:, 0]
ny = ni[:, 1]
nz = ni[:, 2]
sxx = np.sum(nx * nx - 1)
syy = np.sum(ny * ny - 1)
szz = np.sum(nz * nz - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
S = np.asarray([np.asarray([sxx, sxy, sxz]), np.asarray([sxy, syy, syz]), np.asarray([sxz, syz, szz])])
cx = np.sum(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))
cy = np.sum(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))
cz = np.sum(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))
C = np.asarray([cx, cy, cz])
p_intersect = np.linalg.inv(np.asarray(S)).dot(C)
N = s.shape[0]
distances = np.zeros(N, dtype=np.float32)
for j in range(N):
ui = ((p_intersect - s[j, :]).dot(np.transpose(v[j, :]))) / (v[j, :].dot(v[j, :]))
distances[j] = np.linalg.norm(p_intersect - s[j, :] - ui * v[j, :])
# for i=1:N %http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html:
# distances(i) = norm(cross(p_intersect-PA(i,:),p_intersect-PB(i,:))) / norm(Si(i,:));
inds_sorted = np.argsort(distances)
d = distances[inds_sorted[-1]]
first_time = False
points_3D.append(p_intersect)
dists_3D.append(distances)
points_3D = np.asarray(points_3D, dtype=np.float32)
dists_3D = np.asarray(dists_3D, dtype=object)
return points_3D, dists_3D
| 46.644231
| 119
| 0.520305
| 4,193
| 0.864358
| 0
| 0
| 2,552
| 0.526077
| 0
| 0
| 798
| 0.164502
|
6758d510a825ee1d3b5115d43a4e119fa4dab901
| 956
|
py
|
Python
|
bluebottle/donations/migrations/0009_auto_20190130_1140.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/donations/migrations/0009_auto_20190130_1140.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/donations/migrations/0009_auto_20190130_1140.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-30 10:40
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
dependencies = [
('donations', '0008_auto_20170927_1021'),
]
operations = [
migrations.AddField(
model_name='donation',
name='payout_amount',
field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12, verbose_name='Payout amount'),
),
migrations.AddField(
model_name='donation',
name='payout_amount_currency',
field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=3),
),
]
| 31.866667
| 179
| 0.66318
| 677
| 0.708159
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.233264
|
675926d38ebca3605bde9778baaa7d1ff647176f
| 95
|
py
|
Python
|
pickle_storage/tests/__init__.py
|
PyUnchained/pickle_storage
|
c0a978701ae59a9feeb3e14026ff0b2353b2e7f5
|
[
"MIT"
] | null | null | null |
pickle_storage/tests/__init__.py
|
PyUnchained/pickle_storage
|
c0a978701ae59a9feeb3e14026ff0b2353b2e7f5
|
[
"MIT"
] | null | null | null |
pickle_storage/tests/__init__.py
|
PyUnchained/pickle_storage
|
c0a978701ae59a9feeb3e14026ff0b2353b2e7f5
|
[
"MIT"
] | null | null | null |
# import os
# os.environ.setdefault('PICKLE_STORAGE_SETTINGS', 'pickle_storage.tests.settings')
| 47.5
| 83
| 0.810526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.989474
|
6759d2fab349039ee4a85d50f2f8ff9d4646da91
| 6,592
|
py
|
Python
|
src/config.py
|
NicolasSommer/valuenet
|
1ce7e56956b378a8f281e9f9919e6aa98516a9d9
|
[
"Apache-2.0"
] | null | null | null |
src/config.py
|
NicolasSommer/valuenet
|
1ce7e56956b378a8f281e9f9919e6aa98516a9d9
|
[
"Apache-2.0"
] | null | null | null |
src/config.py
|
NicolasSommer/valuenet
|
1ce7e56956b378a8f281e9f9919e6aa98516a9d9
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import json
import os
class Config:
DATA_PREFIX = "data"
EXPERIMENT_PREFIX = "experiments"
def write_config_to_file(args, output_path):
config_path = os.path.join(output_path, "args.json")
with open(config_path, 'w', encoding='utf-8') as f:
json.dump(args.__dict__, f, indent=2)
def _add_model_configuration(parser):
parser.add_argument('--cuda', default=True, action='store_true')
# language model configuration
parser.add_argument('--encoder_pretrained_model', default='facebook/bart-base', type=str)
parser.add_argument('--max_seq_length', default=1024, type=int)
# model configuration
parser.add_argument('--column_pointer', action='store_true', default=True)
parser.add_argument('--embed_size', default=300, type=int, help='size of word embeddings')
parser.add_argument('--hidden_size', default=300, type=int, help='size of LSTM hidden states')
parser.add_argument('--action_embed_size', default=128, type=int, help='size of word embeddings')
parser.add_argument('--att_vec_size', default=300, type=int, help='size of attentional vector')
parser.add_argument('--type_embed_size', default=128, type=int, help='size of word embeddings')
parser.add_argument('--col_embed_size', default=300, type=int, help='size of word embeddings')
parser.add_argument('--readout', default='identity', choices=['identity', 'non_linear'])
parser.add_argument('--column_att', choices=['dot_prod', 'affine'], default='affine')
parser.add_argument('--dropout', default=0.3, type=float, help='dropout rate')
def _add_postgresql_configuration(parser):
parser.add_argument('--database_host', default='localhost', type=str)
parser.add_argument('--database_port', default='18001', type=str)
parser.add_argument('--database_user', default='postgres', type=str)
parser.add_argument('--database_password', default='dummy', type=str)
parser.add_argument('--database_schema', default='unics_cordis', type=str)
def read_arguments_train():
parser = argparse.ArgumentParser(description="Run training with following arguments")
# model configuration
_add_model_configuration(parser)
# general configuration
parser.add_argument('--exp_name', default='exp', type=str)
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--toy', default=False, action='store_true')
parser.add_argument('--data_set', default='spider', type=str)
# training & optimizer configuration
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--num_epochs', default=5.0, type=float)
parser.add_argument('--lr_base', default=1e-3, type=float)
parser.add_argument('--lr_connection', default=1e-4, type=float)
parser.add_argument('--lr_transformer', default=2e-5, type=float)
# parser.add_argument('--adam_eps', default=1e-8, type=float)
parser.add_argument('--scheduler_gamma', default=0.5, type=int)
parser.add_argument('--max_grad_norm', default=1.0, type=float)
parser.add_argument('--clip_grad', default=5., type=float)
parser.add_argument('--loss_epoch_threshold', default=50, type=int)
parser.add_argument('--sketch_loss_weight', default=1.0, type=float)
# prediction configuration (run after each epoch)
parser.add_argument('--beam_size', default=5, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
args = parser.parse_args()
args.data_dir = os.path.join(Config.DATA_PREFIX, args.data_set)
args.model_output_dir = Config.EXPERIMENT_PREFIX
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
def read_arguments_evaluation():
parser = argparse.ArgumentParser(description="Run evaluation with following arguments")
# model configuration
_add_model_configuration(parser)
# evaluation
parser.add_argument('--evaluation_type', default='spider', type=str)
parser.add_argument('--model_to_load', type=str)
parser.add_argument('--prediction_dir', type=str)
parser.add_argument('--batch_size', default=1, type=int)
# general configuration
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--data_set', default='spider', type=str)
# prediction configuration
parser.add_argument('--beam_size', default=1, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
# DB config is only needed in case evaluation is executed on PostgreSQL DB
_add_postgresql_configuration(parser)
parser.add_argument('--database', default='cordis_temporary', type=str)
args = parser.parse_args()
args.data_dir = os.path.join(Config.DATA_PREFIX, args.data_set)
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
def read_arguments_manual_inference():
parser = argparse.ArgumentParser(description="Run manual inference with following arguments")
# model configuration
_add_model_configuration(parser)
# manual_inference
parser.add_argument('--model_to_load', type=str)
parser.add_argument('--api_key', default='1234', type=str)
parser.add_argument('--ner_api_secret', default='PLEASE_ADD_YOUR_OWN_GOOGLE_API_KEY_HERE', type=str)
# database configuration (in case of PostgreSQL, not needed for sqlite)
_add_postgresql_configuration(parser)
# general configuration
parser.add_argument('--seed', default=90, type=int)
parser.add_argument('--batch_size', default=1, type=int)
# prediction configuration
parser.add_argument('--beam_size', default=1, type=int, help='beam size for beam search')
parser.add_argument('--decode_max_time_step', default=40, type=int,
help='maximum number of time steps used in decoding and sampling')
args = parser.parse_args()
print("*** parsed configuration from command line and combine with constants ***")
for argument in vars(args):
print("argument: {}={}".format(argument, getattr(args, argument)))
return args
| 40.944099
| 104
| 0.715564
| 76
| 0.011529
| 0
| 0
| 0
| 0
| 0
| 0
| 2,476
| 0.375607
|
675abb614add4be960125080b494d7201adec0de
| 2,352
|
py
|
Python
|
aqg/utils/summarizer.py
|
Sicaida/Automatic_Question_Generation
|
a228c166d40103a194e1daa23ff37f73c9488a5d
|
[
"MIT"
] | 134
|
2018-04-04T19:06:09.000Z
|
2022-02-24T03:24:36.000Z
|
aqg/utils/summarizer.py
|
Sicaida/Automatic_Question_Generation
|
a228c166d40103a194e1daa23ff37f73c9488a5d
|
[
"MIT"
] | 22
|
2018-09-20T07:17:11.000Z
|
2022-03-11T23:45:15.000Z
|
aqg/utils/summarizer.py
|
sagarparikh2013/Automatic-Question-Generation-NLP
|
6a2cf5d90e47980676f57c67f2ed73be6f8d7fed
|
[
"MIT"
] | 50
|
2018-07-09T16:29:15.000Z
|
2021-12-20T11:37:33.000Z
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
#from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
class TextSummarizer:
def __init__(self, count=10):
self.LANGUAGE = "czech"
self.SENTENCES_COUNT = count
def summarize_from_url(self,url):
parser = HtmlParser.from_url(url, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_text(self,text):
parser = PlaintextParser.from_string(text, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_file(self,file_name):
parser = PlaintextParser.from_file(file_name, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
# t = TextSummarizer()
# t.summarize_from_file("obama_short.txt")
# pdf = pdfgeneration()
# pdf.generate_pdf_summarizer("summarizer_output2.txt")
| 31.783784
| 79
| 0.676446
| 1,748
| 0.743197
| 0
| 0
| 0
| 0
| 0
| 0
| 388
| 0.164966
|
675aeab4c1e2b9cf3c2dce4e2188f947ea6ee089
| 50
|
py
|
Python
|
tests/__init__.py
|
AdamRuddGH/super_json_normalize
|
4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8
|
[
"MIT"
] | 2
|
2021-10-03T02:43:41.000Z
|
2021-10-04T10:15:20.000Z
|
tests/__init__.py
|
AdamRuddGH/super_json_normalize
|
4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
AdamRuddGH/super_json_normalize
|
4a3c77d0e0dce632678ffe40b37fbd98fd2b4be8
|
[
"MIT"
] | null | null | null |
"""Unit test package for super_json_normalize."""
| 25
| 49
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0.98
|