hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bfabf4fd12a9332dcec2ddca5c514bd1320a4778
| 5,954
|
py
|
Python
|
homeassistant/components/tradfri/config_flow.py
|
tohara/home-assistant
|
d7fcb5268ad3c48d4e1a4bd202cd926810ab3e26
|
[
"Apache-2.0"
] | 2
|
2018-01-21T20:25:04.000Z
|
2018-08-01T08:05:06.000Z
|
homeassistant/components/tradfri/config_flow.py
|
tohara/home-assistant
|
d7fcb5268ad3c48d4e1a4bd202cd926810ab3e26
|
[
"Apache-2.0"
] | 4
|
2021-03-19T01:22:18.000Z
|
2022-01-13T01:19:34.000Z
|
homeassistant/components/tradfri/config_flow.py
|
tohara/home-assistant
|
d7fcb5268ad3c48d4e1a4bd202cd926810ab3e26
|
[
"Apache-2.0"
] | 2
|
2018-08-15T03:59:35.000Z
|
2018-10-18T12:20:05.000Z
|
"""Config flow for Tradfri."""
import asyncio
from collections import OrderedDict
from uuid import uuid4
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from .const import (
CONF_IMPORT_GROUPS, CONF_IDENTITY, CONF_HOST, CONF_KEY, CONF_GATEWAY_ID)
KEY_SECURITY_CODE = 'security_code'
KEY_IMPORT_GROUPS = 'import_groups'
class AuthError(Exception):
"""Exception if authentication occurs."""
def __init__(self, code):
"""Initialize exception."""
super().__init__()
self.code = code
@config_entries.HANDLERS.register('tradfri')
class FlowHandler(config_entries.ConfigFlow):
"""Handle a config flow."""
VERSION = 1
def __init__(self):
"""Initialize flow."""
self._host = None
self._import_groups = False
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_auth()
async def async_step_auth(self, user_input=None):
"""Handle the authentication with a gateway."""
errors = {}
if user_input is not None:
host = user_input.get(CONF_HOST, self._host)
try:
auth = await authenticate(
self.hass, host,
user_input[KEY_SECURITY_CODE])
# We don't ask for import group anymore as group state
# is not reliable, don't want to show that to the user.
# But we still allow specifying import group via config yaml.
auth[CONF_IMPORT_GROUPS] = self._import_groups
return await self._entry_from_data(auth)
except AuthError as err:
if err.code == 'invalid_security_code':
errors[KEY_SECURITY_CODE] = err.code
else:
errors['base'] = err.code
fields = OrderedDict()
if self._host is None:
fields[vol.Required(CONF_HOST)] = str
fields[vol.Required(KEY_SECURITY_CODE)] = str
return self.async_show_form(
step_id='auth',
data_schema=vol.Schema(fields),
errors=errors,
)
async def async_step_zeroconf(self, user_input):
"""Handle zeroconf discovery."""
for entry in self._async_current_entries():
if entry.data[CONF_HOST] == user_input['host']:
return self.async_abort(
reason='already_configured'
)
self._host = user_input['host']
return await self.async_step_auth()
async_step_homekit = async_step_zeroconf
async def async_step_import(self, user_input):
"""Import a config entry."""
for entry in self._async_current_entries():
if entry.data[CONF_HOST] == user_input['host']:
return self.async_abort(
reason='already_configured'
)
# Happens if user has host directly in configuration.yaml
if 'key' not in user_input:
self._host = user_input['host']
self._import_groups = user_input[CONF_IMPORT_GROUPS]
return await self.async_step_auth()
try:
data = await get_gateway_info(
self.hass, user_input['host'],
# Old config format had a fixed identity
user_input.get('identity', 'homeassistant'),
user_input['key'])
data[CONF_IMPORT_GROUPS] = user_input[CONF_IMPORT_GROUPS]
return await self._entry_from_data(data)
except AuthError:
# If we fail to connect, just pass it on to discovery
self._host = user_input['host']
return await self.async_step_auth()
async def _entry_from_data(self, data):
"""Create an entry from data."""
host = data[CONF_HOST]
gateway_id = data[CONF_GATEWAY_ID]
same_hub_entries = [entry.entry_id for entry
in self._async_current_entries()
if entry.data[CONF_GATEWAY_ID] == gateway_id or
entry.data[CONF_HOST] == host]
if same_hub_entries:
await asyncio.wait([self.hass.config_entries.async_remove(entry_id)
for entry_id in same_hub_entries])
return self.async_create_entry(
title=host,
data=data
)
async def authenticate(hass, host, security_code):
"""Authenticate with a Tradfri hub."""
from pytradfri.api.aiocoap_api import APIFactory
from pytradfri import RequestError
identity = uuid4().hex
api_factory = APIFactory(host, psk_id=identity)
try:
with async_timeout.timeout(5):
key = await api_factory.generate_psk(security_code)
except RequestError:
raise AuthError('invalid_security_code')
except asyncio.TimeoutError:
raise AuthError('timeout')
return await get_gateway_info(hass, host, identity, key)
async def get_gateway_info(hass, host, identity, key):
"""Return info for the gateway."""
from pytradfri.api.aiocoap_api import APIFactory
from pytradfri import Gateway, RequestError
try:
factory = APIFactory(
host,
psk_id=identity,
psk=key,
loop=hass.loop
)
api = factory.request
gateway = Gateway()
gateway_info_result = await api(gateway.get_gateway_info())
await factory.shutdown()
except (OSError, RequestError):
# We're also catching OSError as PyTradfri doesn't catch that one yet
# Upstream PR: https://github.com/ggravlingen/pytradfri/pull/189
raise AuthError('cannot_connect')
return {
CONF_HOST: host,
CONF_IDENTITY: identity,
CONF_KEY: key,
CONF_GATEWAY_ID: gateway_info_result.id,
}
| 31.336842
| 79
| 0.610346
|
d9c2e4bd6f0c9ce65f421931b5d79cd3eb3dcc19
| 44
|
py
|
Python
|
midea_beautiful/version.py
|
ethan021021/midea-beautiful-air
|
9f71f1111551fa79c3e899c68144c79ffac9ad95
|
[
"MIT"
] | null | null | null |
midea_beautiful/version.py
|
ethan021021/midea-beautiful-air
|
9f71f1111551fa79c3e899c68144c79ffac9ad95
|
[
"MIT"
] | null | null | null |
midea_beautiful/version.py
|
ethan021021/midea-beautiful-air
|
9f71f1111551fa79c3e899c68144c79ffac9ad95
|
[
"MIT"
] | null | null | null |
""" Version File """
__version__ = "0.8.42"
| 14.666667
| 22
| 0.590909
|
3ce0f60a187e6b042ad58edd14f17e797bbc9301
| 744
|
py
|
Python
|
discordbot.py
|
tasuren/CuBOT-Discord
|
176549e0f33afb37a28f38c7d2f27f5a254dd603
|
[
"MIT"
] | null | null | null |
discordbot.py
|
tasuren/CuBOT-Discord
|
176549e0f33afb37a28f38c7d2f27f5a254dd603
|
[
"MIT"
] | null | null | null |
discordbot.py
|
tasuren/CuBOT-Discord
|
176549e0f33afb37a28f38c7d2f27f5a254dd603
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
import os
import sys
intents = discord.Intents.all()
client = discord.Client(intents=intents)
bot = commands.Bot(
command_prefix=["Cu!", "cu!"],
help_command=None,
intents=intents,
allowed_mentions=discord.AllowedMentions(replied_user=False, everyone=False, roles=False),
case_insensitive=True
)
token = os.environ["token"]
def restart_bot():
os.execv(sys.executable, ['python'] + sys.argv)
bot.load_extension('jishaku')
bot.load_extension("Cogs.event")
bot.load_extension("Cogs.bot")
bot.load_extension("Cogs.tool")
bot.load_extension("Cogs.data")
bot.load_extension("Cogs.variety")
bot.load_extension("Cogs.activity")
bot.load_extension("Cogs.level")
bot.run(token)
| 23.25
| 94
| 0.751344
|
fdb7129d116187f6e0260469ccc6937ecf6adbf3
| 556
|
py
|
Python
|
map/urls.py
|
noah-dev/meetup_map
|
fa047518e7c932960473018460a20c3149e61d98
|
[
"MIT"
] | null | null | null |
map/urls.py
|
noah-dev/meetup_map
|
fa047518e7c932960473018460a20c3149e61d98
|
[
"MIT"
] | 2
|
2020-02-11T23:22:17.000Z
|
2020-06-05T17:24:58.000Z
|
map/urls.py
|
noah-dev/event_mapper
|
fa047518e7c932960473018460a20c3149e61d98
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'map'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^privacy/$', views.privacy, name='privacy'),
url(r'^concept/$', views.concept, name='concept'),
url(r'^partial/$', views.partial, name='partial'),
url(r'^meetups_data/$', views.meetups_data, name='meetups_data'),
url(r'^concept_meetups_data/$', views.concept_meetups_data, name='concept_meetups_data'),
url(r'^partial_meetups_data/$', views.partial_meetups_data, name='partial_meetups_data'),
]
| 37.066667
| 93
| 0.690647
|
f093c86b0ade571a816a02a13203929ef70bb9c7
| 14,431
|
py
|
Python
|
rpc/train_rnn.py
|
siddharthgir/google-research
|
07bf624ffbde9307f3ebe0a685804ec665eef919
|
[
"Apache-2.0"
] | null | null | null |
rpc/train_rnn.py
|
siddharthgir/google-research
|
07bf624ffbde9307f3ebe0a685804ec665eef919
|
[
"Apache-2.0"
] | null | null | null |
rpc/train_rnn.py
|
siddharthgir/google-research
|
07bf624ffbde9307f3ebe0a685804ec665eef919
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for training and evaluating RPC agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
import time
import pdb
from absl import app
from absl import flags
import math
from absl import logging
import gin
import numpy as np
import rpc_agent
import rpc_utils
from six.moves import range
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.agents import data_converter
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import greedy_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
from tf_agents.networks import utils
flags.DEFINE_string('trained_agent_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Directory containing saved policy')
flags.DEFINE_string('eval_rb_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Directory containing saved policy')
flags.DEFINE_string('train_rb_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Directory containing saved policy')
flags.DEFINE_multi_string('gin_file', None, 'Path to the trainer config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding to pass through.')
FLAGS = flags.FLAGS
def _activation(t,
clip_mean=30.0,
clip_max_stddev=10.0,
clip_min_stddev=0.1):
t1, t2 = tf.split(t, 2, axis=1)
low = -np.inf if clip_mean is None else -clip_mean
high = np.inf if clip_mean is None else clip_mean
t1 = rpc_utils.squash_to_range(t1, low, high)
if clip_min_stddev is None:
low = -np.inf
else:
low = tf.math.log(tf.exp(clip_min_stddev) - 1.0)
if clip_max_stddev is None:
high = np.inf
else:
high = tf.math.log(tf.exp(clip_max_stddev) - 1.0)
t2 = rpc_utils.squash_to_range(t2, low, high)
return tf.concat([t1, t2], axis=1)
def _custom_activation(t,
clip_mean=30.0):
low = -np.inf if clip_mean is None else -clip_mean
high = np.inf if clip_mean is None else clip_mean
t = rpc_utils.squash_to_range(t, low, high)
return t
def get_agent(agent_dir,
rb_dir,
eval_rb_dir,
env_name='HalfCheetah-v2',
latent_dim=10,
predictor_num_layers=2,
actor_fc_layers=(),
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=(256, 256),
replay_buffer_capacity=100000,
eval_replay_buffer_capacity = 10000):
env = suite_gym.load(env_name)
tf_env = tf_py_environment.TFPyEnvironment(env)
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
encoder_net = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(
tfp.layers.IndependentNormal.params_size(latent_dim ),
activation=_activation,
kernel_initializer='glorot_uniform'),
tfp.layers.IndependentNormal(latent_dim),
])
obs_input = tf.keras.layers.Input(observation_spec.shape)
action_input = tf.keras.layers.Input(action_spec.shape)
z = encoder_net(obs_input)
z = tf.stop_gradient(z)
za = tf.concat([z, action_input], axis=1)
za_input = tf.keras.layers.Input(za.shape[1])
loc_scale = tf.keras.Sequential(
predictor_num_layers * [tf.keras.layers.Dense(256, activation='relu')] + [ # pylint: disable=line-too-long
tf.keras.layers.Dense(
tfp.layers.IndependentNormal.params_size(latent_dim),
activation=_activation,
kernel_initializer='zeros'),
])(za_input)
combined_loc_scale = tf.concat([
loc_scale[:, :latent_dim] + za_input[:, :latent_dim],
loc_scale[:, latent_dim:]],axis=1)
dist = tfp.layers.IndependentNormal(latent_dim)(combined_loc_scale)
output = tf.keras.Model(inputs=za_input, outputs=dist)(za)
predictor_net = tf.keras.Model(inputs=(obs_input, action_input),
outputs=output)
actor_net = rpc_utils.ActorNet(
input_tensor_spec=observation_spec,
output_tensor_spec=action_spec,
encoder=encoder_net,
predictor=predictor_net,
fc_layers=actor_fc_layers)
critic_net = rpc_utils.CriticNet(
(observation_spec, action_spec),
observation_fc_layer_params=critic_obs_fc_layers,
action_fc_layer_params=critic_action_fc_layers,
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer='glorot_uniform',
last_kernel_initializer='glorot_uniform')
tf_agent = rpc_agent.RpAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
actor_optimizer=None,
alpha_optimizer=None,
critic_network=critic_net,
critic_optimizer=None)
agent_checkpointer = common.Checkpointer(
agent = tf_agent,
ckpt_dir=agent_dir)
agent_checkpointer.initialize_or_restore()
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=replay_buffer_capacity)
rb_checkpointer = common.Checkpointer(
replay_buffer = replay_buffer,
ckpt_dir=rb_dir)
rb_checkpointer.initialize_or_restore()
eval_replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=tf_env.batch_size,
max_length=eval_replay_buffer_capacity)
eval_rb_checkpointer = common.Checkpointer(
replay_buffer = eval_replay_buffer,
ckpt_dir=eval_rb_dir)
eval_rb_checkpointer.initialize_or_restore()
return tf_env,tf_agent,replay_buffer,eval_replay_buffer
def _filter_invalid_transition(trajectories,unusedarg1):
return tf.reduce_all(~trajectories.is_boundary()[:-1])
@gin.configurable
def train_prediction_model(
trained_agent_dir,
train_rb_dir,
eval_rb_dir,
stacked_steps=5,
num_epochs=10,
env_name='HalfCheetah-v2',
learning_rate=1e-4,
latent_dim=10
):
tf_env,tf_agent,rb,eval_rb = get_agent(trained_agent_dir,train_rb_dir,eval_rb_dir,latent_dim=latent_dim)
action_spec = tf_env.action_spec()
_,encoder = tf_agent._actor_network._predictor,tf_agent._actor_network._z_encoder
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate)
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
current_time = "rnn_multi_"+str(stacked_steps)
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
evaluation_loss_1 = tf.keras.metrics.Mean('evaluation_loss_step_1', dtype=tf.float32)
evaluation_loss_2 = tf.keras.metrics.Mean('evaluation_loss_step_2', dtype=tf.float32)
evaluation_loss_3 = tf.keras.metrics.Mean('evaluation_loss_step_3', dtype=tf.float32)
evaluation_loss_4 = tf.keras.metrics.Mean('evaluation_loss_step_4', dtype=tf.float32)
evaluation_loss_5 = tf.keras.metrics.Mean('evaluation_loss_step_5', dtype=tf.float32)
lstm_z_input = tf.keras.layers.Input((1,latent_dim),batch_size=128)
lstm_a_input = tf.keras.layers.Input((1,action_spec.shape[0]),batch_size=128)
za = tf.concat([lstm_z_input,lstm_a_input],axis=2)
lstm_model = tf.keras.layers.LSTM(32,return_sequences=True,stateful=True)(za)
loc_scale = tf.keras.Sequential(
2 * [tf.keras.layers.Dense(256, activation='relu')]+[tf.keras.layers.Dense(latent_dim,activation=_custom_activation,kernel_initializer='zeros')])(lstm_model)
combined_loc_scale = loc_scale + lstm_z_input[:,-latent_dim:]
predictor_net = tf.keras.Model(inputs=[lstm_z_input,lstm_a_input],outputs=combined_loc_scale)
#model = tf.keras.Sequential()
"""
#Creation of model
z_input = tf.keras.layers.Input(latent_dim*stacked_steps)
a_input = tf.keras.layers.Input(action_spec.shape[0]*stacked_steps)
za = tf.concat([z_input,a_input],axis=1)
loc_scale = tf.keras.Sequential(
4 * [tf.keras.layers.Dense(512, activation='relu')]+[tf.keras.layers.Dense(latent_dim,activation=_custom_activation,kernel_initializer='zeros')])(za)
combined_loc_scale = loc_scale + z_input[:,-latent_dim:]
predictor_net = tf.keras.Model(inputs=(z_input, a_input),
outputs=combined_loc_scale)
"""
dataset = rb.as_dataset(sample_batch_size=128,num_steps=stacked_steps+1).filter(_filter_invalid_transition)
dataset = dataset.prefetch(10)
iterator = iter(dataset)
def train(trajectories):
#Generate Encodings First
predictor_net.layers[3].reset_states()
batch_squash = utils.BatchSquash(2)
obs = batch_squash.flatten(trajectories.observation)
latent_encodings = encoder(obs,training=False).mean()
latent_encodings = batch_squash.unflatten(latent_encodings)
for i in range(0,stacked_steps):
input_traj = tf.expand_dims(latent_encodings[:,i],axis=1)
action = tf.expand_dims(trajectories.action[:,i],axis=1)
predicted_encoding = predictor_net((input_traj,action),training=True)
output_encoding = latent_encodings[:,stacked_steps]
loss = tf.keras.losses.mean_squared_error(predicted_encoding[:,0], output_encoding)
predictor_net.layers[3].reset_states()
return loss
def eval_step(trajectories):
predictor_net.layers[3].reset_states()
batch_squash = utils.BatchSquash(2)
obs = batch_squash.flatten(trajectories.observation)
latent_encodings = encoder(obs,training=False).mean()
latent_encodings = batch_squash.unflatten(latent_encodings)
for i in range(0,stacked_steps):
input_traj = tf.expand_dims(latent_encodings[:,i],axis=1)
action = tf.expand_dims(trajectories.action[:,i],axis=1)
predicted_encoding = predictor_net((input_traj,action),training=True)
output_encoding = latent_encodings[:,stacked_steps]
losses = []
losses.append(tf.keras.losses.MSE(output_encoding, predicted_encoding[:,0]))
for i in range(0,4):
input_trajs = predicted_encoding
actions = tf.expand_dims(trajectories.action[:,stacked_steps+i],axis=1)
predicted_encoding = predictor_net((input_trajs,actions),training=False)
output_encoding = latent_encodings[:,stacked_steps+i+1]
losses.append(tf.keras.losses.MSE(output_encoding, predicted_encoding[:,0]))
predictor_net.layers[3].reset_states()
return losses
eval_step = common.function(eval_step)
def eval(timestep):
dataset = eval_rb.as_dataset(sample_batch_size=128,num_steps=stacked_steps+5).filter(_filter_invalid_transition)
dataset.prefetch(10)
iterator = iter(dataset)
i = 0
for trajectories,_ in iterator:
losses = eval_step(trajectories)
evaluation_loss_1(losses[0])
evaluation_loss_2(losses[1])
evaluation_loss_3(losses[2])
evaluation_loss_4(losses[3])
evaluation_loss_5(losses[4])
i += 128
if i > 10000:
break
with train_summary_writer.as_default():
tf.summary.scalar('eval_loss_step_1', evaluation_loss_1.result(), step=timestep)
tf.summary.scalar('eval_loss_step_2', evaluation_loss_2.result(), step=timestep)
tf.summary.scalar('eval_loss_step_3', evaluation_loss_3.result(), step=timestep)
tf.summary.scalar('eval_loss_step_4', evaluation_loss_4.result(), step=timestep)
tf.summary.scalar('eval_loss_step_5', evaluation_loss_5.result(), step=timestep)
evaluation_loss_1.reset_states()
evaluation_loss_2.reset_states()
evaluation_loss_3.reset_states()
evaluation_loss_4.reset_states()
evaluation_loss_5.reset_states()
train = common.function(train)
i = 0
for trajectories,_ in iterator: #Adjust length
with tf.GradientTape() as tape:
loss = train(trajectories)
grads = tape.gradient(loss, predictor_net.trainable_weights)
optimizer.apply_gradients(zip(grads, predictor_net.trainable_weights))
train_loss(loss)
if i%2000 == 0:
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=i)
print("Iteration",i,":",train_loss.result())
train_loss.reset_states()
if i%10000 == 0:
eval(i)
predictor_net.save('curr_model_'+current_time+'/my_model')
if i%100000 == 0:
predictor_net.save('curr_model_'+current_time+'/model_epoch_'+str(i))
if i>=2500000:
break
i += 1
def main(_):
tf.compat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
if 'xm_parameters' in FLAGS and FLAGS.xm_parameters:
hparams = json.loads(FLAGS.xm_parameters)
with gin.unlock_config():
for (key, value) in hparams.items():
print('Setting: %s = %s' % (key, value))
gin.bind_parameter(key, value)
trained_agent_dir = FLAGS.trained_agent_dir
eval_rb_dir = FLAGS.eval_rb_dir
train_rb_dir = FLAGS.train_rb_dir
train_prediction_model(trained_agent_dir,train_rb_dir,eval_rb_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('trained_agent_dir')
flags.mark_flag_as_required('train_rb_dir')
flags.mark_flag_as_required('eval_rb_dir')
app.run(main)
| 34.941889
| 165
| 0.719285
|
e33eae78807dd55bc502aaae39697168ab2ab486
| 1,138
|
py
|
Python
|
scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 1,761
|
2015-07-10T23:14:17.000Z
|
2022-03-30T07:49:49.000Z
|
scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 487
|
2015-07-07T23:21:20.000Z
|
2022-03-30T17:13:22.000Z
|
scripts/examples/Arduino/Portenta-H7/04-Image-Filters/mean_adaptive_threshold_filter.py
|
jiskra/openmv
|
a0f321836f77f94d8118910598dcdb79eb784d58
|
[
"MIT"
] | 882
|
2015-08-01T08:34:19.000Z
|
2022-03-30T07:36:23.000Z
|
# Mean Adaptive Threshold Filter Example
#
# This example shows off mean filtering with adaptive thresholding.
# When mean(threshold=True) the mean() method adaptive thresholds the image
# by comparing the mean of the pixels around a pixel, minus an offset, with that pixel.
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# The first argument is the kernel size. N coresponds to a ((N*2)+1)^2
# kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You
# shouldn't ever need to use a value bigger than 2.
img.mean(1, threshold=True, offset=5, invert=True)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
| 43.769231
| 87
| 0.730228
|
a766bb12f6957dc0ecb3ad35df49e5b0f1314691
| 2,591
|
py
|
Python
|
clustering/visualization.py
|
ECML-PKDD-2021/SIMPRIM
|
5d2127b66aa443ac2c2a1e4a59e9b07c055f93d4
|
[
"MIT"
] | null | null | null |
clustering/visualization.py
|
ECML-PKDD-2021/SIMPRIM
|
5d2127b66aa443ac2c2a1e4a59e9b07c055f93d4
|
[
"MIT"
] | null | null | null |
clustering/visualization.py
|
ECML-PKDD-2021/SIMPRIM
|
5d2127b66aa443ac2c2a1e4a59e9b07c055f93d4
|
[
"MIT"
] | null | null | null |
# Standard
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set() # for plot styling
from sklearn.metrics import confusion_matrix
import skopt.plots
def plot_confusion_matrix(true_labels, cluster_class_labels, size=(13,13)):
"""
Confusion Matrix that shows how the cluster class labels are
overlapping with the original journey class label. The hungarian
algorithm is used to map a cluster label to a class label with minimal
errors.
"""
k = max(len(set(true_labels)), len(set(cluster_class_labels)))
plt.figure(figsize=size)
if k > 10:
plt.subplot(3, 1, 1)
else:
plt.subplot(1, 3, 1)
og_mat = confusion_matrix(true_labels, true_labels)
sns.heatmap(og_mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=[i for i in range(k)],
yticklabels=[i for i in range(k)])
plt.title('Original Distribution', fontsize=14)
plt.xlabel('true label')
plt.ylabel('predicted label')
if k > 10:
plt.subplot(3, 1, 2)
else:
plt.subplot(1, 3, 2)
mat = confusion_matrix(true_labels, cluster_class_labels)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=[i for i in range(k)],
yticklabels=[i for i in range(k)])
plt.title('Optimal Cluster-Label Assignment', fontsize=14)
plt.xlabel('true label')
plt.tight_layout()
fig1 = plt.gcf()
plt.show()
return fig1
def plot_weight_optimization(optimization_results, figsize=(10,6)):
plt.figure(figsize=(10,6))
skopt.plots.plot_convergence(optimization_results)
plt.title('Weight Convergence Plot', fontsize=16)
plt.show()
def plot_average_results(avg_scores_list_ami, nr_features_list, labels, avg_scores_list_sdbw=None):
xi = list(range(len(nr_features_list)))
plt.figure(figsize=(14,4))
plt.subplot(1, 2, 1)
plt.plot(xi, avg_scores_list_ami, marker='o', linestyle='--', color='r', label=labels[0])
plt.ylim((0.25, 0.5))
plt.xlabel('nr of features')
plt.ylabel('Average AMI')
plt.xticks(xi, nr_features_list)
plt.legend()
if avg_scores_list_sdbw is not None:
plt.subplot(1, 2, 2)
plt.plot(xi, avg_scores_list_sdbw, marker='o', linestyle='--', color='b', label=labels[1])
plt.ylim((0.5, 1.5))
plt.xlabel('nr of features')
plt.ylabel('Average S_Dbw')
plt.xticks(xi, nr_features_list)
plt.legend()
plt.show()
| 31.987654
| 99
| 0.644925
|
0325dd2238cd3ece608986b3ce91cff4016991fb
| 3,503
|
py
|
Python
|
tests/test_exceptions/test_exceptions.py
|
ricohapi/ricoh-cloud-sdk-python
|
740d778c678e6097e3c35478545cbf283276a7ee
|
[
"MIT"
] | 2
|
2018-08-14T21:01:07.000Z
|
2019-12-16T07:21:09.000Z
|
tests/test_exceptions/test_exceptions.py
|
ricohapi/ricoh-cloud-sdk-python
|
740d778c678e6097e3c35478545cbf283276a7ee
|
[
"MIT"
] | null | null | null |
tests/test_exceptions/test_exceptions.py
|
ricohapi/ricoh-cloud-sdk-python
|
740d778c678e6097e3c35478545cbf283276a7ee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ricoh Co., Ltd. All Rights Reserved.
from unittest import TestCase
from ricohcloudsdk.exceptions import ClientError, ServerError
class TestSDKError(TestCase):
def __make_sdk_error_message(self, status_code, code='undefined', message='undefined'):
ERROR_TEMPLATE = 'http_status: {status_code}, code: {code}, message: {message}'
sdk_error_message = ERROR_TEMPLATE.format(
status_code=status_code,
code=code,
message=message
)
return sdk_error_message
def test_client_error_normal(self):
api_error_message = {'error': {'code': 'resource_not_found',
'message': 'The specified resource does not exist.'}}
sdk_error_message = self.__make_sdk_error_message(
status_code='404',
code='resource_not_found',
message='The specified resource does not exist.'
)
try:
raise ClientError(404, api_error_message)
except ClientError as excinfo:
assert 404 == excinfo.status_code
assert api_error_message == excinfo.response
assert sdk_error_message == excinfo.args[0]
def test_client_error_only_message(self):
api_error_message = {'message': 'Unsupported Media Type'}
sdk_error_message = self.__make_sdk_error_message(
status_code='415',
message='Unsupported Media Type'
)
try:
raise ClientError(415, api_error_message)
except ClientError as excinfo:
assert 415 == excinfo.status_code
assert api_error_message == excinfo.response
assert sdk_error_message == excinfo.args[0]
def test_client_error_only_str(self):
api_error_message = 'HTTP content length exceeded 10485760 bytes.'
sdk_error_message = self.__make_sdk_error_message(
status_code='413',
message=api_error_message
)
try:
raise ClientError(413, api_error_message)
except ClientError as excinfo:
assert 413 == excinfo.status_code
assert api_error_message == excinfo.response
assert sdk_error_message == excinfo.args[0]
def test_server_error_normal(self):
api_error_message = {'error': {'code': 'time_out',
'message': 'The operation could not be completed within the acceptable time.'}}
sdk_error_message = self.__make_sdk_error_message(
status_code='500',
code='time_out',
message='The operation could not be completed within the acceptable time.'
)
try:
raise ServerError(500, api_error_message)
except ServerError as excinfo:
assert 500 == excinfo.status_code
assert api_error_message == excinfo.response
assert sdk_error_message == excinfo.args[0]
def test_server_error_only_message(self):
api_error_message = {'message': 'Internal server error'}
sdk_error_message = self.__make_sdk_error_message(
status_code='500',
message='Internal server error'
)
try:
raise ServerError(500, api_error_message)
except ServerError as excinfo:
assert 500 == excinfo.status_code
assert api_error_message == excinfo.response
assert sdk_error_message == excinfo.args[0]
| 40.264368
| 118
| 0.630888
|
209ebcca233df3cff540ba1f5949787f1924a748
| 4,749
|
py
|
Python
|
cosmic-core/test/integration/tests/test_vm_migration.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 64
|
2016-01-30T13:31:00.000Z
|
2022-02-21T02:13:25.000Z
|
cosmic-core/test/integration/tests/test_vm_migration.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 525
|
2016-01-22T10:46:31.000Z
|
2022-02-23T11:08:01.000Z
|
cosmic-core/test/integration/tests/test_vm_migration.py
|
sanderv32/cosmic
|
9a9d86500b67255a1c743a9438a05c0d969fd210
|
[
"Apache-2.0"
] | 25
|
2016-01-13T16:46:46.000Z
|
2021-07-23T15:22:27.000Z
|
from cosmic.base import *
from cosmic.common import *
from cosmic.cosmicLog import CosmicLog
from cosmic.cosmicTestCase import cosmicTestCase
class TestVmMigration(cosmicTestCase):
@classmethod
def setUpClass(cls, redundant=False):
cls.class_cleanup = []
cls.logger = CosmicLog(CosmicLog.LOGGER_TEST).get_logger()
cls.testClient = super(TestVmMigration, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.apiclient,
cls.zone.id,
)
cls.vpc_offering = get_default_redundant_vpc_offering(cls.apiclient) if redundant else get_default_vpc_offering(cls.apiclient)
cls.logger.debug("VPC Offering '%s' selected", cls.vpc_offering.name)
cls.network_offering = get_default_network_offering(cls.apiclient)
cls.logger.debug("Network Offering '%s' selected", cls.network_offering.name)
cls.virtual_machine_offering = get_default_virtual_machine_offering(cls.apiclient)
cls.logger.debug("Virtual Machine Offering '%s' selected", cls.virtual_machine_offering.name)
cls.hosts = Host.list(cls.apiclient, listall=True, type="Routing")
cls.logger.debug("Creating Admin Account for Domain ID ==> %s" % cls.domain.id)
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id)
cls.class_cleanup.append(cls.account)
vpc = VPC.create(
api_client=cls.apiclient,
services=cls.services["vpc"],
networkDomain="vpc.vpn",
vpcofferingid=cls.vpc_offering.id,
zoneid=cls.zone.id,
account=cls.account.name,
domainid=cls.domain.id
)
wait_vpc_ready(vpc)
cls.logger.debug("VPC %s created" % vpc.id)
ntwk = Network.create(
api_client=cls.apiclient,
services=cls.services["network_1"],
accountid=cls.account.name,
domainid=cls.domain.id,
networkofferingid=cls.network_offering.id,
zoneid=cls.zone.id,
vpcid=vpc.id
)
cls.logger.debug("Network %s created in VPC %s" % (ntwk.id, vpc.id))
cls.vm = VirtualMachine.create(cls.apiclient, services=cls.services["virtual_machine"],
templateid=cls.template.id,
zoneid=cls.zone.id,
accountid=cls.account.name,
domainid=cls.domain.id,
serviceofferingid=cls.virtual_machine_offering.id,
networkids=ntwk.id
)
cls.logger.debug("VM %s deployed in VPC %s" % (cls.vm.id, vpc.id))
cls.logger.debug("Deployed virtual machine: OK")
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls.class_cleanup, cls.logger)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.method_cleanup = []
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.method_cleanup, self.logger)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_live_migrate(self):
hv_to_migrate_to = self.get_dest_hypervisor()[0].id
self.logger.debug("Migrate VM %s from HV %s to HV %s" % (self.vm.id, self.vm.hostid, hv_to_migrate_to))
self.vm = self.vm.migrate(self.apiclient, hostid=hv_to_migrate_to)
self.assertEqual(self.vm.hostid, hv_to_migrate_to, "VM not migrate to HV %s" % hv_to_migrate_to)
def test_02_migrate_back(self):
vm = self.vm.list(self.apiclient, id=self.vm.id)[0]
hv_to_migrate_to = self.get_dest_hypervisor(vm.hostid)[0].id
self.logger.debug("Migrate VM %s from HV %s to HV %s" % (self.vm.id, self.vm.hostid, hv_to_migrate_to))
self.vm.migrate(self.apiclient, hostid=hv_to_migrate_to)
self.assertEqual(self.vm.hostid, hv_to_migrate_to, "VM not migrate to HV %s" % hv_to_migrate_to)
def get_dest_hypervisor(self, hostid=None):
if hostid is None:
hostid = self.vm.hostid
return filter(lambda x: x.id != hostid, self.hosts)
| 42.026549
| 134
| 0.619709
|
c183a4527519c4f7f32de0e781e5d8e9717b0e2e
| 296
|
py
|
Python
|
stubs/micropython-pyboard-1_13-95/hashlib.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 38
|
2020-10-18T21:59:44.000Z
|
2022-03-17T03:03:28.000Z
|
stubs/micropython-pyboard-1_13-95/hashlib.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 176
|
2020-10-18T14:31:03.000Z
|
2022-03-30T23:22:39.000Z
|
stubs/micropython-pyboard-1_13-95/hashlib.py
|
RonaldHiemstra/micropython-stubs
|
d97f879b01f6687baaebef1c7e26a80909c3cff3
|
[
"MIT"
] | 6
|
2020-12-28T21:11:12.000Z
|
2022-02-06T04:07:50.000Z
|
"""
Module: 'hashlib' on pyboard 1.13.0-95
"""
# MCU: (sysname='pyboard', nodename='pyboard', release='1.13.0', version='v1.13-95-g0fff2e03f on 2020-10-03', machine='PYBv1.1 with STM32F405RG')
# Stubber: 1.3.4
class sha256:
''
def digest():
pass
def update():
pass
| 19.733333
| 145
| 0.60473
|
f6a422180b0dfbf62a353c7a5579b054abc01723
| 186,707
|
py
|
Python
|
pandas/core/indexes/base.py
|
falcaopetri/pandas
|
7673357191709036faad361cbb5f31a802703249
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6
|
2021-01-06T04:28:46.000Z
|
2021-11-24T01:40:20.000Z
|
pandas/core/indexes/base.py
|
falcaopetri/pandas
|
7673357191709036faad361cbb5f31a802703249
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/base.py
|
falcaopetri/pandas
|
7673357191709036faad361cbb5f31a802703249
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
import operator
from textwrap import dedent
from typing import TYPE_CHECKING, Any, FrozenSet, Hashable, Union
import warnings
import numpy as np
from pandas._libs import algos as libalgos, index as libindex, lib
import pandas._libs.join as libjoin
from pandas._libs.lib import is_datetime_array, no_default
from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import tz_compare
from pandas._typing import Label
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.cast import (
maybe_cast_to_integer_array,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_categorical,
ensure_int64,
ensure_object,
ensure_platform_int,
is_bool,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_period_dtype,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
is_unsigned_integer_dtype,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import (
ABCCategorical,
ABCDataFrame,
ABCDatetimeIndex,
ABCIntervalIndex,
ABCMultiIndex,
ABCPandasArray,
ABCPeriodIndex,
ABCRangeIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import array_equivalent, isna
from pandas.core import ops
from pandas.core.accessor import CachedAccessor
import pandas.core.algorithms as algos
from pandas.core.arrays import ExtensionArray
from pandas.core.base import IndexOpsMixin, PandasObject
import pandas.core.common as com
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.frozen import FrozenList
import pandas.core.missing as missing
from pandas.core.ops import get_op_result_name
from pandas.core.ops.invalid import make_invalid_op
from pandas.core.strings import StringMethods
from pandas.io.formats.printing import (
PrettyDict,
default_pprint,
format_object_attrs,
format_object_summary,
pprint_thing,
)
if TYPE_CHECKING:
from pandas import Series
__all__ = ["Index"]
_unsortable_types = frozenset(("mixed", "mixed-integer"))
_index_doc_kwargs = dict(
klass="Index",
inplace="",
target_klass="Index",
raises_section="",
unique="Index",
duplicated="np.ndarray",
)
_index_shared_docs = dict()
str_t = str
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError("Lengths must match to compare")
if is_object_dtype(self) and isinstance(other, ABCCategorical):
left = type(other)(self._values, dtype=other.dtype)
return op(left, other)
elif is_object_dtype(self) and isinstance(other, ExtensionArray):
# e.g. PeriodArray
with np.errstate(all="ignore"):
result = op(self.values, other)
elif is_object_dtype(self) and not isinstance(self, ABCMultiIndex):
# don't pass MultiIndex
with np.errstate(all="ignore"):
result = ops.comp_method_OBJECT_ARRAY(op, self.values, other)
else:
with np.errstate(all="ignore"):
result = op(self.values, np.asarray(other))
if is_bool_dtype(result):
return result
return ops.invalid_comparison(self, other, op)
name = f"__{op.__name__}__"
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame, ABCTimedeltaIndex)):
return NotImplemented
from pandas import Series
result = op(Series(self), other)
if isinstance(result, tuple):
return (Index(result[0]), Index(result[1]))
return Index(result)
name = f"__{op.__name__}__"
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__.
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
if issubclass(cls, ABCMultiIndex):
if "labels" in d and "codes" not in d:
# GH#23752 "labels" kwarg has been replaced with "codes"
d["codes"] = d.pop("labels")
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
See Also
--------
RangeIndex : Index implementing a monotonic integer range.
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index.
IntervalIndex : An Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
"""
# tolist is not actually deprecated, just suppressed in the __dir__
_deprecations: FrozenSet[str] = (
PandasObject._deprecations
| IndexOpsMixin._deprecations
| frozenset(["contains", "set_value"])
)
# To hand over control to subclasses
_join_precedence = 1
# Cython methods; see github.com/cython/cython/issues/2647
# for why we need to wrap these instead of making them class attributes
# Moreover, cython will choose the appropriate-dtyped sub-function
# given the dtypes of the passed arguments
def _left_indexer_unique(self, left, right):
return libjoin.left_join_indexer_unique(left, right)
def _left_indexer(self, left, right):
return libjoin.left_join_indexer(left, right)
def _inner_indexer(self, left, right):
return libjoin.inner_join_indexer(left, right)
def _outer_indexer(self, left, right):
return libjoin.outer_join_indexer(left, right)
_typ = "index"
_data: Union[ExtensionArray, np.ndarray]
_id = None
_name: Label = None
# MultiIndex.levels previously allowed setting the index name. We
# don't allow this anymore, and raise if it happens rather than
# failing silently.
_no_setting_name: bool = False
_comparables = ["name"]
_attributes = ["name"]
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
# whether we support partial string indexing. Overridden
# in DatetimeIndex and PeriodIndex
_supports_partial_string_indexing = False
_accessors = {"str"}
str = CachedAccessor("str", StringMethods)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, data=None, dtype=None, copy=False, name=None, tupleize_cols=True, **kwargs,
) -> "Index":
from pandas.core.indexes.range import RangeIndex
name = maybe_extract_name(name, data, cls)
if isinstance(data, ABCPandasArray):
# ensure users don't accidentally put a PandasArray in an index.
data = data.to_numpy()
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, dtype=dtype, name=name)
# categorical
elif is_categorical_dtype(data) or is_categorical_dtype(dtype):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas.core.indexes.category import CategoricalIndex
return _maybe_asobject(dtype, CategoricalIndex, data, copy, name, **kwargs)
# interval
elif is_interval_dtype(data) or is_interval_dtype(dtype):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas.core.indexes.interval import IntervalIndex
return _maybe_asobject(dtype, IntervalIndex, data, copy, name, **kwargs)
elif (
is_datetime64_any_dtype(data)
or is_datetime64_any_dtype(dtype)
or "tz" in kwargs
):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas import DatetimeIndex
return _maybe_asobject(dtype, DatetimeIndex, data, copy, name, **kwargs)
elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas import TimedeltaIndex
return _maybe_asobject(dtype, TimedeltaIndex, data, copy, name, **kwargs)
elif is_period_dtype(data) or is_period_dtype(dtype):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas import PeriodIndex
return _maybe_asobject(dtype, PeriodIndex, data, copy, name, **kwargs)
# extension dtype
elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype):
if not (dtype is None or is_object_dtype(dtype)):
# coerce to the provided dtype
ea_cls = dtype.construct_array_type()
data = ea_cls._from_sequence(data, dtype=dtype, copy=False)
else:
data = np.asarray(data, dtype=object)
# coerce to the object dtype
data = data.astype(object)
return Index(data, dtype=object, copy=copy, name=name, **kwargs)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
# Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
UInt64Index,
)
if dtype is not None:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
data = _maybe_cast_with_dtype(data, dtype, copy)
dtype = data.dtype # TODO: maybe not for object?
# maybe coerce to a sub-class
if is_signed_integer_dtype(data.dtype):
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype("object")
else:
subarr = com.asarray_tuplesafe(data, dtype=object)
# asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
new_data, new_dtype = _maybe_cast_data_without_dtype(subarr)
if new_dtype is not None:
return cls(
new_data, dtype=new_dtype, copy=False, name=name, **kwargs
)
if kwargs:
raise TypeError(f"Unexpected keyword arguments {repr(set(kwargs))}")
if subarr.ndim > 1:
# GH#13601, GH#20285, GH#27125
raise ValueError("Index data must be 1-dimensional")
return cls._simple_new(subarr, name)
elif data is None or is_scalar(data):
raise cls._scalar_data_error(data)
elif hasattr(data, "__array__"):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name, **kwargs)
else:
if tupleize_cols and is_list_like(data):
# GH21470: convert iterable to list before determining if empty
if is_iterator(data):
data = list(data)
if data and all(isinstance(e, tuple) for e in data):
# we must be all tuples, otherwise don't construct
# 10697
from pandas.core.indexes.multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get("names")
)
# other iterable of some kind
subarr = com.asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@property
def asi8(self):
"""
Integer representation of the values.
Returns
-------
ndarray
An ndarray with int64 dtype.
"""
return None
@classmethod
def _simple_new(cls, values, name: Label = None):
"""
We require that we have a dtype compat for the values. If we are passed
a non-dtype compat, then coerce using the constructor.
Must be careful not to recurse.
"""
assert isinstance(values, np.ndarray), type(values)
result = object.__new__(cls)
result._data = values
# _index_data is a (temporary?) fix to ensure that the direct data
# manipulation we do in `_libs/reduction.pyx` continues to work.
# We need access to the actual ndarray, since we're messing with
# data buffers and strides.
result._index_data = values
result._name = name
result._cache = {}
return result._reset_identity()
@cache_readonly
def _constructor(self):
return type(self)
# --------------------------------------------------------------------
# Index Internals Methods
def _get_attributes_dict(self):
"""
Return an attributes dict for my class.
"""
return {k: getattr(self, k, None) for k in self._attributes}
def _shallow_copy(self, values=None, name: Label = no_default):
"""
Create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
name : Label, defaults to self.name
"""
name = self.name if name is no_default else name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self.values
result = self._simple_new(values, name=name)
result._cache = cache
return result
def _shallow_copy_with_infer(self, values, **kwargs):
"""
Create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence.
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes["copy"] = False
if not len(values) and "dtype" not in kwargs:
# TODO: what if hasattr(values, "dtype")?
attributes["dtype"] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
# Remove tz so Index will try non-DatetimeIndex inference
attributes.pop("tz", None)
return Index(values, **attributes)
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def is_(self, other) -> bool:
"""
More flexible, faster check like ``is`` but that works through views.
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(other, "_id", Ellipsis) and self._id is not None
def _reset_identity(self):
"""
Initializes or resets ``_id`` attribute with new object.
"""
self._id = _Identity()
return self
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _engine(self):
# property, for now, slow to look up
# to avoid a reference cycle, bind `target_values` to a local variable, so
# `self` is not passed into the lambda.
target_values = self._get_engine_target()
return self._engine_type(lambda: target_values, len(self))
# --------------------------------------------------------------------
# Array-Like Methods
# ndarray compat
def __len__(self) -> int:
"""
Return the length of the Index.
"""
return len(self._data)
def __array__(self, dtype=None) -> np.ndarray:
"""
The array interface, return my values.
"""
return np.asarray(self._data, dtype=dtype)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result) or np.ndim(result) > 1:
return result
attrs = self._get_attributes_dict()
return Index(result, **attrs)
@cache_readonly
def dtype(self):
"""
Return the dtype object of the underlying data.
"""
return self._data.dtype
def ravel(self, order="C"):
"""
Return an ndarray of the flattened values of the underlying data.
Returns
-------
numpy.ndarray
Flattened array.
See Also
--------
numpy.ndarray.ravel
"""
values = self._get_engine_target()
return values.ravel(order=order)
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, "_typ"):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def astype(self, dtype, copy=True):
"""
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
Note that any signed integer `dtype` is treated as ``'int64'``,
and any unsigned integer `dtype` is treated as ``'uint64'``,
regardless of the size.
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
"""
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from pandas.core.indexes.category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy)
elif is_extension_array_dtype(dtype):
return Index(np.asarray(self), name=self.name, dtype=dtype, copy=copy)
try:
casted = self.values.astype(dtype, copy=copy)
except (TypeError, ValueError) as err:
raise TypeError(
f"Cannot cast {type(self).__name__} to dtype {dtype}"
) from err
return Index(casted, name=self.name, dtype=dtype)
_index_shared_docs[
"take"
] = """
Return a new %(klass)s of the values selected by the indices.
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken.
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError.
Returns
-------
numpy.ndarray
Elements of given indices.
See Also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(
self.values,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value,
)
else:
if allow_fill and fill_value is not None:
cls_name = type(self).__name__
raise ValueError(
f"Unable to fill values because {cls_name} cannot contain NA"
)
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(
self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan
):
"""
Internal method to handle NA filling of take.
"""
indices = ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
raise ValueError(
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
taken = algos.take(
values, indices, allow_fill=allow_fill, fill_value=na_value
)
else:
taken = values.take(indices)
return taken
_index_shared_docs[
"repeat"
] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
repeated_index : %(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx
Index(['a', 'b', 'c'], dtype='object')
>>> idx.repeat(2)
Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')
>>> idx.repeat([1, 2, 3])
Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')
"""
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
repeats = ensure_platform_int(repeats)
nv.validate_repeat(tuple(), dict(axis=axis))
return self._shallow_copy(self._values.repeat(repeats))
# --------------------------------------------------------------------
# Copying Methods
def copy(self, name=None, deep=False, dtype=None, names=None):
"""
Make a copy of this object.
Name and dtype sets those attributes on the new object.
Parameters
----------
name : Label, optional
Set name for new object.
deep : bool, default False
dtype : numpy dtype or pandas type, optional
Set dtype for new object.
names : list-like, optional
Kept for compatibility with MultiIndex. Should not be used.
Returns
-------
Index
Index refer to new object which is a copy of this object.
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
return self.copy(deep=True)
# --------------------------------------------------------------------
# Rendering Methods
def __repr__(self) -> str_t:
"""
Return a string representation for this object.
"""
klass_name = type(self).__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
attrs_str = [f"{k}={v}" for k, v in attrs]
prepr = f",{space}".join(attrs_str)
# no data provided, just attributes
if data is None:
data = ""
res = f"{klass_name}({data}{prepr})"
return res
def _format_space(self) -> str_t:
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function.
"""
return default_pprint
def _format_data(self, name=None) -> str_t:
"""
Return the formatted data as a unicode string.
"""
# do we want to justify (only do so for non-objects)
is_justify = True
if self.inferred_type == "string":
is_justify = False
elif self.inferred_type == "categorical":
if is_object_dtype(self.categories): # type: ignore
is_justify = False
return format_object_summary(
self, self._formatter_func, is_justify=is_justify, name=name
)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
return format_object_attrs(self)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
def format(self, name: bool = False, formatter=None, **kwargs):
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep="NaN", **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=("\t", "\r", "\n")) for x in values]
# could have nans
mask = isna(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify="left"))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values.
Returns
-------
numpy.ndarray
Formatted values.
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep="", quoting=None, **kwargs):
"""
Actually format specific types of the index.
"""
mask = isna(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def _summary(self, name=None) -> str_t:
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if hasattr(head, "format") and not isinstance(head, str):
head = head.format()
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
index_summary = f", {head} to {tail}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
return f"{name}: {len(self)} entries{index_summary}"
# --------------------------------------------------------------------
# Conversion Methods
def to_flat_index(self):
"""
Identity method.
.. versionadded:: 0.24.0
This is implemented for compatibility with subclass implementations
when chaining.
Returns
-------
pd.Index
Caller.
See Also
--------
MultiIndex.to_flat_index : Subclass implementation.
"""
return self
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Dame of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self.values.copy(), index=index, name=name)
def to_frame(self, index: bool = True, name=None):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.24.0
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original Index.
name : object, default None
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(index=False, name='zoo')
zoo
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
if name is None:
name = self.name or 0
result = DataFrame({name: self._values.copy()})
if index:
result.index = self
return result
# --------------------------------------------------------------------
# Name-Centric Methods
@property
def name(self):
"""
Return Index or MultiIndex name.
"""
return self._name
@name.setter
def name(self, value):
if self._no_setting_name:
# Used in MultiIndex.levels to avoid silently ignoring name updates.
raise RuntimeError(
"Cannot set name on a level of a MultiIndex. Use "
"'MultiIndex.set_names' instead."
)
maybe_extract_name(value, None, type(self))
self._name = value
def _validate_names(self, name=None, names=None, deep: bool = False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def _get_names(self):
return FrozenList((self.name,))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError(f"Length of new names must be 1, got {len(values)}")
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError(f"{type(self).__name__}.name must be a hashable type")
self._name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace: bool = False):
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
Index.rename : Able to set new names without level.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.set_names('species', level=0)
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
"""
if level is not None and not isinstance(self, ABCMultiIndex):
raise ValueError("Level must be None for non-MultiIndex")
if level is not None and not is_list_like(level) and is_list_like(names):
raise TypeError("Names must be a string when a single level is provided.")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Alter Index or MultiIndex name.
Able to set new names without level. Defaults to returning new index.
Length of names must match number of levels in MultiIndex.
Parameters
----------
name : label or list of labels
Name(s) to set.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
Index.set_names : Able to set new names partially and by level.
Examples
--------
>>> idx = pd.Index(['A', 'C', 'A', 'B'], name='score')
>>> idx.rename('grade')
Index(['A', 'C', 'A', 'B'], dtype='object', name='grade')
>>> idx = pd.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]],
... names=['kind', 'year'])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['kind', 'year'])
>>> idx.rename(['species', 'year'])
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
names=['species', 'year'])
>>> idx.rename('species')
Traceback (most recent call last):
TypeError: Must pass list-like as `names`.
"""
return self.set_names([name], inplace=inplace)
# --------------------------------------------------------------------
# Level-Centric Methods
@property
def nlevels(self) -> int:
"""
Number of levels.
"""
return 1
def _sort_levels_monotonic(self):
"""
Compat with MultiIndex.
"""
return self
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level, "
f"{level} is not a valid level number"
)
elif level > 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level + 1}"
)
elif level != self.name:
raise KeyError(
f"Requested level ({level}) does not match index name ({self.name})"
)
def _get_level_number(self, level) -> int:
self._validate_index_level(level)
return 0
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API.
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : bool, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def _get_level_values(self, level):
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
MultiIndex.get_level_values : Get values for a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> idx = pd.Index(list('abc'))
>>> idx
Index(['a', 'b', 'c'], dtype='object')
Get level values by supplying `level` as integer:
>>> idx.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object')
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
def droplevel(self, level=0):
"""
Return index with requested level(s) removed.
If resulting index has only 1 level left, the result will be
of Index type, not MultiIndex.
.. versionadded:: 0.23.1 (support for non-MultiIndex)
Parameters
----------
level : int, str, or list-like, default 0
If a string is given, must be the name of a level
If list-like, elements must be names or indexes of levels.
Returns
-------
Index or MultiIndex
"""
if not isinstance(level, (tuple, list)):
level = [level]
levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]
if len(level) == 0:
return self
if len(level) >= self.nlevels:
raise ValueError(
f"Cannot remove {len(level)} levels from an index with {self.nlevels} "
"levels: at least one level must be left."
)
# The two checks above guarantee that here self is a MultiIndex
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
for i in levnums:
new_levels.pop(i)
new_codes.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_codes[0] == -1
result = new_levels[0].take(new_codes[0])
if mask.any():
result = result.putmask(mask, np.nan)
result._name = new_names[0]
return result
else:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex(
levels=new_levels,
codes=new_codes,
names=new_names,
verify_integrity=False,
)
def _get_grouper_for_level(self, mapper, level=None):
"""
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on.
labels : ndarray of int or None
Array of locations in level_index.
uniques : Index or None
Index of unique values for level.
"""
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
# --------------------------------------------------------------------
# Introspection Methods
@property
def is_monotonic(self) -> bool:
"""
Alias for is_monotonic_increasing.
"""
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self) -> bool:
"""
Return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self) -> bool:
"""
Return if the index is strictly monotonic increasing
(only increasing) values.
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self) -> bool:
"""
Return if the index is strictly monotonic decreasing
(only decreasing) values.
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
@cache_readonly
def is_unique(self) -> bool:
"""
Return if the index has unique values.
"""
return self._engine.is_unique
@property
def has_duplicates(self) -> bool:
"""
Check if the Index has duplicate values.
Returns
-------
bool
Whether or not the Index has duplicate values.
Examples
--------
>>> idx = pd.Index([1, 5, 7, 7])
>>> idx.has_duplicates
True
>>> idx = pd.Index([1, 5, 7])
>>> idx.has_duplicates
False
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
True
>>> idx = pd.Index(["Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.has_duplicates
False
"""
return not self.is_unique
def is_boolean(self) -> bool:
"""
Check if the Index only consists of booleans.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index([True, False, True])
>>> idx.is_boolean()
True
>>> idx = pd.Index(["True", "False", "True"])
>>> idx.is_boolean()
False
>>> idx = pd.Index([True, False, "True"])
>>> idx.is_boolean()
False
"""
return self.inferred_type in ["boolean"]
def is_integer(self) -> bool:
"""
Check if the Index only consists of integers.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_integer()
True
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer()
False
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer()
False
"""
return self.inferred_type in ["integer"]
def is_floating(self) -> bool:
"""
Check if the Index is a floating type.
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
Returns
-------
bool
Whether or not the Index only consists of only consists of floats, NaNs, or
a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating()
True
>>> idx = pd.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating()
True
>>> idx = pd.Index([1, 2, 3, 4, np.nan])
>>> idx.is_floating()
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_floating()
False
"""
return self.inferred_type in ["floating", "mixed-integer-float", "integer-na"]
def is_numeric(self) -> bool:
"""
Check if the Index only consists of numeric data.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric()
True
>>> idx = pd.Index([1, 2, 3, 4.0])
>>> idx.is_numeric()
True
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx.is_numeric()
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric()
True
>>> idx = pd.Index([1, 2, 3, 4.0, np.nan, "Apple"])
>>> idx.is_numeric()
False
"""
return self.inferred_type in ["integer", "floating"]
def is_object(self) -> bool:
"""
Check if the Index is of the object dtype.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object()
True
>>> idx = pd.Index(["Apple", "Mango", 2.0])
>>> idx.is_object()
True
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.object()
False
>>> idx = pd.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object()
False
"""
return is_object_dtype(self.dtype)
def is_categorical(self) -> bool:
"""
Check if the Index holds categorical data.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_interval : Check if the Index holds Interval objects.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ["categorical"]
def is_interval(self) -> bool:
"""
Check if the Index holds Interval objects.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_mixed : Check if the Index holds data with mixed data types.
Examples
--------
>>> idx = pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
>>> idx.is_interval()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_interval()
False
"""
return self.inferred_type in ["interval"]
def is_mixed(self) -> bool:
"""
Check if the Index holds data with mixed data types.
Returns
-------
bool
Whether or not the Index holds data with mixed data types.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> idx = pd.Index(['a', np.nan, 'b'])
>>> idx.is_mixed()
True
>>> idx = pd.Index([1.0, 2.0, 3.0, 5.0])
>>> idx.is_mixed()
False
"""
return self.inferred_type in ["mixed"]
def holds_integer(self) -> bool:
"""
Whether the type is an integer type.
"""
return self.inferred_type in ["integer", "mixed-integer"]
@cache_readonly
def inferred_type(self) -> str_t:
"""
Return a string of the type inferred from the values.
"""
return lib.infer_dtype(self, skipna=False)
@cache_readonly
def is_all_dates(self) -> bool:
"""
Whether or not the index values only consist of dates.
"""
return is_datetime_array(ensure_object(self.values))
# --------------------------------------------------------------------
# Pickle Methods
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Null Handling Methods
_na_value = np.nan
"""The expected NA value to use with this index."""
@cache_readonly
def _isnan(self):
"""
Return if each value is NaN.
"""
if self._can_hold_na:
return isna(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
return self._isnan.nonzero()[0]
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
if self._can_hold_na:
return bool(self._isnan.any())
else:
return False
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values. Characters such as
empty strings `''` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
numpy.ndarray
A boolean array of whether my values are NA.
See Also
--------
Index.notna : Boolean inverse of isna.
Index.dropna : Omit entries with missing values.
isna : Top-level isna.
Series.isna : Detect missing values in Series object.
Examples
--------
Show which entries in a pandas.Index are NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.isna()
array([False, False, True], dtype=bool)
Empty strings are not considered NA values. None is considered an NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.isna()
array([False, False, False, True], dtype=bool)
For datetimes, `NaT` (Not a Time) is considered as an NA value.
>>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),
... pd.Timestamp(''), None, pd.NaT])
>>> idx
DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]', freq=None)
>>> idx.isna()
array([False, True, True, True], dtype=bool)
"""
return self._isnan
isnull = isna
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``
values.
Returns
-------
numpy.ndarray
Boolean array to indicate which entries are not NA.
See Also
--------
Index.notnull : Alias of notna.
Index.isna: Inverse of notna.
notna : Top-level notna.
Examples
--------
Show which entries in an Index are not NA. The result is an
array.
>>> idx = pd.Index([5.2, 6.0, np.NaN])
>>> idx
Float64Index([5.2, 6.0, nan], dtype='float64')
>>> idx.notna()
array([ True, True, False])
Empty strings are not considered NA values. None is considered a NA
value.
>>> idx = pd.Index(['black', '', 'red', None])
>>> idx
Index(['black', '', 'red', None], dtype='object')
>>> idx.notna()
array([ True, True, True, False])
"""
return ~self.isna()
notnull = notna
def fillna(self, value=None, downcast=None):
"""
Fill NA/NaN values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
Index
See Also
--------
DataFrame.fillna : Fill NaN values of a DataFrame.
Series.fillna : Fill NaN Values of a Series.
"""
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
def dropna(self, how="any"):
"""
Return Index without NA/NaN values.
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
Index
"""
if how not in ("any", "all"):
raise ValueError(f"invalid how option: {how}")
if self.hasnans:
return self._shallow_copy(self._values[~self._isnan])
return self._shallow_copy()
# --------------------------------------------------------------------
# Uniqueness Methods
def unique(self, level=None):
"""
Return unique values in the index. Uniques are returned in order
of appearance, this does NOT sort.
Parameters
----------
level : int or str, optional, default None
Only return values from specified level (for MultiIndex).
.. versionadded:: 0.23.0
Returns
-------
Index without duplicates
See Also
--------
unique
Series.unique
"""
if level is not None:
self._validate_index_level(level)
result = super().unique()
return self._shallow_copy(result)
def drop_duplicates(self, keep="first"):
"""
Return Index with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
Returns
-------
deduplicated : Index
See Also
--------
Series.drop_duplicates : Equivalent method on Series.
DataFrame.drop_duplicates : Equivalent method on DataFrame.
Index.duplicated : Related method on Index, indicating duplicate
Index values.
Examples
--------
Generate an pandas.Index with duplicate values.
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])
The `keep` parameter controls which duplicate values are removed.
The value 'first' keeps the first occurrence for each
set of duplicated entries. The default value of keep is 'first'.
>>> idx.drop_duplicates(keep='first')
Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')
The value 'last' keeps the last occurrence for each set of duplicated
entries.
>>> idx.drop_duplicates(keep='last')
Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')
The value ``False`` discards all sets of duplicated entries.
>>> idx.drop_duplicates(keep=False)
Index(['cow', 'beetle', 'hippo'], dtype='object')
"""
return super().drop_duplicates(keep=keep)
def duplicated(self, keep="first"):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- 'first' : Mark duplicates as ``True`` except for the first
occurrence.
- 'last' : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
numpy.ndarray
See Also
--------
Series.duplicated : Equivalent method on pandas.Series.
DataFrame.duplicated : Equivalent method on pandas.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep on ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
return super().duplicated(keep=keep)
def _get_unique_index(self, dropna: bool = False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool, default False
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if not isinstance(self, ABCMultiIndex):
# extract an array to pass to _shallow_copy
values = values._data
if dropna:
try:
if self.hasnans:
values = values[~isna(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
# --------------------------------------------------------------------
# Arithmetic & Logical Methods
def __add__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
from pandas import Series
return Index(Series(self) + other)
def __radd__(self, other):
from pandas import Series
return Index(other + Series(self))
def __iadd__(self, other):
# alias for __add__
return self + other
def __sub__(self, other):
return Index(np.array(self) - other)
def __rsub__(self, other):
# wrap Series to ensure we pin name correctly
from pandas import Series
return Index(other - Series(self))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def __nonzero__(self):
raise ValueError(
f"The truth value of a {type(self).__name__} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
)
__bool__ = __nonzero__
# --------------------------------------------------------------------
# Set Operation Methods
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = get_op_result_name(self, other)
if self.name != name:
return self._shallow_copy(name=name)
return self
def _union_incompatible_dtypes(self, other, sort):
"""
Casts this and other index to object dtype to allow the formation
of a union between incompatible types.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
Index
"""
this = self.astype(object, copy=False)
# cast to Index for when `other` is list-like
other = Index(other).astype(object, copy=False)
return Index.union(this, other, sort=sort).astype(object, copy=False)
def _is_compatible_with_other(self, other) -> bool:
"""
Check whether this and the other dtype are compatible with each other.
Meaning a union can be formed between them without needing to be cast
to dtype object.
Parameters
----------
other : Index or array-like
Returns
-------
bool
"""
return type(self) is type(other) and is_dtype_equal(self.dtype, other.dtype)
def _validate_sort_keyword(self, sort):
if sort not in [None, False]:
raise ValueError(
"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
def union(self, other, sort=None):
"""
Form the union of two Index objects.
If the Index objects are incompatible, both Index objects will be
cast to dtype('object') first.
.. versionchanged:: 0.25.0
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
union : Index
Examples
--------
Union matching dtypes
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
Union mismatched dtypes
>>> idx1 = pd.Index(['a', 'b', 'c', 'd'])
>>> idx2 = pd.Index([1, 2, 3, 4])
>>> idx1.union(idx2)
Index(['a', 'b', 'c', 'd', 1, 2, 3, 4], dtype='object')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if not self._is_compatible_with_other(other):
return self._union_incompatible_dtypes(other, sort=sort)
return self._union(other, sort=sort)
def _union(self, other, sort):
"""
Specific union logic should go here. In subclasses, union behavior
should be overwritten here rather than in `self.union`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
Index
"""
if not len(other) or self.equals(other):
return self._get_reconciled_name_object(other)
if not len(self):
return other._get_reconciled_name_object(self)
# TODO(EA): setops-refactor, clean all this up
lvals = self._values
rvals = other._values
if sort is None and self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(lvals, rvals)[0]
except TypeError:
# incomparable objects
result = list(lvals)
# worth making this faster? a very unusual case
value_set = set(lvals)
result.extend([x for x in rvals if x not in value_set])
result = Index(result)._values # do type inference here
else:
# find indexes of things in "other" that are not in "self"
if self.is_unique:
indexer = self.get_indexer(other)
indexer = (indexer == -1).nonzero()[0]
else:
indexer = algos.unique1d(self.get_indexer_non_unique(other)[1])
if len(indexer) > 0:
other_diff = algos.take_nd(rvals, indexer, allow_fill=False)
result = concat_compat((lvals, other_diff))
else:
result = lvals
if sort is None:
try:
result = algos.safe_sort(result)
except TypeError as err:
warnings.warn(
f"{err}, sort order is undefined for incomparable objects",
RuntimeWarning,
stacklevel=3,
)
# for subclasses
return self._wrap_setop_result(other, result)
def _wrap_setop_result(self, other, result):
name = get_op_result_name(self, other)
return self._shallow_copy(result, name=name)
# TODO: standardize return type of non-union setops type(self vs other)
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
the behaviour of 0.23.4 and earlier.
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other = ensure_index(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype("O")
other = other.astype("O")
return this.intersection(other, sort=sort)
# TODO(EA): setops-refactor, clean all this up
lvals = self._values
rvals = other._values
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(lvals, rvals)[0]
except TypeError:
pass
else:
return self._wrap_setop_result(other, result)
try:
indexer = Index(rvals).get_indexer(lvals)
indexer = indexer.take((indexer != -1).nonzero()[0])
except (InvalidIndexError, IncompatibleFrequency):
# InvalidIndexError raised by get_indexer if non-unique
# IncompatibleFrequency raised by PeriodIndex.get_indexer
indexer = algos.unique1d(Index(rvals).get_indexer_non_unique(lvals)[0])
indexer = indexer[indexer != -1]
taken = other.take(indexer)
res_name = get_op_result_name(self, other)
if sort is None:
taken = algos.safe_sort(taken.values)
return self._shallow_copy(taken, name=res_name)
taken.name = res_name
return taken
def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([2, 1, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Int64Index([2, 1], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if self.equals(other):
# pass an empty np.ndarray with the appropriate dtype
return self._shallow_copy(self._data[:0])
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True)
the_diff = this.values.take(label_diff)
if sort is None:
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name)
def symmetric_difference(self, other, result_name=None, sort=None):
"""
Compute the symmetric difference of two Index objects.
Parameters
----------
other : Index or array-like
result_name : str
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by pandas.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(
np.arange(this.size), common_indexer, assume_unique=True
)
left_diff = this._values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other._values.take(right_indexer)
the_diff = concat_compat([left_diff, right_diff])
if sort is None:
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs["name"] = result_name
if "freq" in attribs:
attribs["freq"] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError("Input must be Index or array-like")
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = get_op_result_name(self, other)
return other, result_name
# --------------------------------------------------------------------
# Indexing Methods
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : int or float, optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
loc : int if unique index, slice if monotonic index, else mask
Examples
--------
>>> unique_index = pd.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = pd.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = pd.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True])
"""
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
casted_key = self._maybe_cast_indexer(key)
try:
return self._engine.get_loc(casted_key)
except KeyError as err:
raise KeyError(key) from err
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError("get_loc requires scalar valued input")
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
_index_shared_docs[
"get_indexer"
] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
%(raises_section)s
Examples
--------
>>> index = pd.Index(['c', 'a', 'b'])
>>> index.get_indexer(['a', 'b', 'x'])
array([ 1, 2, -1])
Notice that the return value is an array of locations in ``index``
and ``x`` is marked by -1, as it is not in ``index``.
"""
@Appender(_index_shared_docs["get_indexer"] % _index_doc_kwargs)
def get_indexer(
self, target, method=None, limit=None, tolerance=None
) -> np.ndarray:
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance, target)
# Treat boolean labels passed to a numeric index as not found. Without
# this fix False and True would be treated as 0 and 1 respectively.
# (GH #16877)
if target.is_boolean() and self.is_numeric():
return ensure_platform_int(np.repeat(-1, target.size))
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(
ptarget, method=method, limit=limit, tolerance=tolerance
)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
if not self.is_unique:
raise InvalidIndexError(
"Reindexing only valid with uniquely valued Index objects"
)
if method == "pad" or method == "backfill":
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == "nearest":
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if doing pad, "
"backfill or nearest reindexing"
)
if limit is not None:
raise ValueError(
"limit argument only valid if doing pad, "
"backfill or nearest reindexing"
)
indexer = self._engine.get_indexer(target._get_engine_target())
return ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance, target):
# override this method on subclasses
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
return tolerance
def _get_fill_indexer(
self, target: "Index", method: str_t, limit=None, tolerance=None
) -> np.ndarray:
target_values = target._get_engine_target()
if self.is_monotonic_increasing and target.is_monotonic_increasing:
engine_method = (
self._engine.get_pad_indexer
if method == "pad"
else self._engine.get_backfill_indexer
)
indexer = engine_method(target_values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method, limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)
return indexer
def _get_fill_indexer_searchsorted(
self, target: "Index", method: str_t, limit=None
) -> np.ndarray:
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets.
"""
if limit is not None:
raise ValueError(
f"limit argument for {repr(method)} method only well-defined "
"if index and target are monotonic"
)
side = "left" if method == "pad" else "right"
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = indexer == -1
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side)
if side == "left":
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target: "Index", limit, tolerance) -> np.ndarray:
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, "pad", limit=limit)
right_indexer = self.get_indexer(target, "backfill", limit=limit)
target_values = target._values
left_distances = np.abs(self._values[left_indexer] - target_values)
right_distances = np.abs(self._values[right_indexer] - target_values)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(
op(left_distances, right_distances) | (right_indexer == -1),
left_indexer,
right_indexer,
)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target_values, indexer, tolerance)
return indexer
def _filter_indexer_tolerance(
self,
target: Union["Index", np.ndarray, ExtensionArray],
indexer: np.ndarray,
tolerance,
) -> np.ndarray:
distance = abs(self._values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
# --------------------------------------------------------------------
# Indexer Conversion Methods
def _get_partial_string_timestamp_match_key(self, key):
"""
Translate any partial string timestamp matches in key, returning the
new key.
Only relevant for MultiIndex.
"""
# GH#10331
return key
def _validate_positional_slice(self, key: slice):
"""
For positional indexing, a slice must have either int or None
for each of start, stop, and step.
"""
self._validate_indexer("positional", key.start, "iloc")
self._validate_indexer("positional", key.stop, "iloc")
self._validate_indexer("positional", key.step, "iloc")
def _convert_slice_indexer(self, key: slice, kind: str_t):
"""
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'loc', 'getitem'}
"""
assert kind in ["loc", "getitem"], kind
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_index_slice = is_int(start) and is_int(stop) and is_int(step)
is_positional = is_index_slice and not (
self.is_integer() or self.is_categorical()
)
if kind == "getitem":
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
self._validate_indexer("slice", key.start, "getitem")
self._validate_indexer("slice", key.stop, "getitem")
self._validate_indexer("slice", key.step, "getitem")
return key
# convert the slice to an indexer here
# if we are mixed and have integers
if is_positional and self.is_mixed():
try:
# Validate start & stop
if start is not None:
self.get_loc(start)
if stop is not None:
self.get_loc(stop)
is_positional = False
except KeyError:
pass
if com.is_null_slice(key):
# It doesn't matter if we are positional or label based
indexer = key
elif is_positional:
if kind == "loc":
# GH#16121, GH#24612, GH#31810
warnings.warn(
"Slicing a positional slice with .loc is not supported, "
"and will raise TypeError in a future version. "
"Use .loc with labels or .iloc with positions instead.",
FutureWarning,
stacklevel=6,
)
indexer = key
else:
indexer = self.slice_indexer(start, stop, step, kind=kind)
return indexer
def _convert_listlike_indexer(self, keyarr):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
indexer : numpy.ndarray or None
Return an ndarray or None if cannot convert.
keyarr : numpy.ndarray
Return tuple-safe keys.
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr)
return indexer, keyarr
def _convert_arr_indexer(self, keyarr):
"""
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
keyarr = com.asarray_tuplesafe(keyarr)
return keyarr
def _convert_index_indexer(self, keyarr):
"""
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
return keyarr
def _convert_list_indexer(self, keyarr):
"""
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, loc, optional
Returns
-------
positional indexer or None
"""
return None
def _invalid_indexer(self, form: str_t, key):
"""
Consistent invalid indexer message.
"""
raise TypeError(
f"cannot do {form} indexing on {type(self).__name__} with these "
f"indexers [{key}] of type {type(key).__name__}"
)
# --------------------------------------------------------------------
# Reindex Methods
def _can_reindex(self, indexer):
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values
as necessary).
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, "name")
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
if isinstance(self, ABCRangeIndex):
values = range(0)
else:
values = self._data[:0] # appropriately-dtyped empty array
target = self._simple_new(values, name=self.name)
else:
target = ensure_index(target)
if level is not None:
if method is not None:
raise TypeError("Fill method not supported if level passed")
_, indexer, _ = self._join_level(
target, level, how="right", return_indexers=True
)
else:
if self.equals(target):
indexer = None
else:
# check is_overlapping for IntervalIndex compat
if self.is_unique and not getattr(self, "is_overlapping", False):
indexer = self.get_indexer(
target, method=method, limit=limit, tolerance=tolerance
)
else:
if method is not None or limit is not None:
raise ValueError(
"cannot reindex a non-unique index "
"with a method or limit"
)
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
target = ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels)
return new_index, indexer, new_indexer
# --------------------------------------------------------------------
# Join Methods
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
"""
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns
-------
join_index, (left_indexer, right_indexer)
"""
other = ensure_index(other)
self_is_mi = isinstance(self, ABCMultiIndex)
other_is_mi = isinstance(other, ABCMultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how, return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(
other, level, how=how, return_indexers=return_indexers
)
if len(other) == 0 and how in ("left", "outer"):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ("right", "outer"):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {"right": "left", "left": "right"}.get(how, how)
result = other.join(
self, how=how, level=level, return_indexers=return_indexers
)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype("O")
other = other.astype("O")
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(
other, how=how, return_indexers=return_indexers
)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(
other, how=how, return_indexers=return_indexers
)
else:
return self._join_non_unique(
other, how=how, return_indexers=return_indexers
)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(
other, how=how, return_indexers=return_indexers
)
except TypeError:
pass
if how == "left":
join_index = self
elif how == "right":
join_index = other
elif how == "inner":
# TODO: sort=False here for backwards compat. It may
# be better to use the sort parameter passed into join
join_index = self.intersection(other, sort=False)
elif how == "outer":
# TODO: sort=True here for backwards compat. It may
# be better to use the sort parameter passed into join
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from pandas.core.indexes.multi import MultiIndex
from pandas.core.reshape.merge import _restore_dropped_levels_multijoin
# figure out join names
self_names = set(com.not_none(*self.names))
other_names = set(com.not_none(*other.names))
overlap = self_names & other_names
# need at least 1 in common
if not overlap:
raise ValueError("cannot join with no overlapping index names")
self_is_mi = isinstance(self, ABCMultiIndex)
other_is_mi = isinstance(other, ABCMultiIndex)
if self_is_mi and other_is_mi:
# Drop the non-matching levels from left and right respectively
ldrop_names = list(self_names - overlap)
rdrop_names = list(other_names - overlap)
# if only the order differs
if not len(ldrop_names + rdrop_names):
self_jnlevels = self
other_jnlevels = other.reorder_levels(self.names)
else:
self_jnlevels = self.droplevel(ldrop_names)
other_jnlevels = other.droplevel(rdrop_names)
# Join left and right
# Join on same leveled multi-index frames is supported
join_idx, lidx, ridx = self_jnlevels.join(
other_jnlevels, how, return_indexers=True
)
# Restore the dropped levels
# Returned index level order is
# common levels, ldrop_names, rdrop_names
dropped_names = ldrop_names + rdrop_names
levels, codes, names = _restore_dropped_levels_multijoin(
self, other, dropped_names, join_idx, lidx, ridx
)
# Re-create the multi-index
multi_join_idx = MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=False
)
multi_join_idx = multi_join_idx.remove_unused_levels()
return multi_join_idx, lidx, ridx
jl = list(overlap)[0]
# Case where only one index is multi
# make the indices into mi's that match
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {"right": "left", "left": "right"}.get(how, how)
level = other.names.index(jl)
result = self._join_level(
other, level, how=how, return_indexers=return_indexers
)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
def _join_non_unique(self, other, how="left", return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
# We only get here if dtypes match
assert self.dtype == other.dtype
if is_extension_array_dtype(self.dtype):
lvalues = self._data._values_for_argsort()
rvalues = other._data._values_for_argsort()
else:
lvalues = self._values
rvalues = other._values
left_idx, right_idx = _get_join_indexers(
[lvalues], [rvalues], how=how, sort=True
)
left_idx = ensure_platform_int(left_idx)
right_idx = ensure_platform_int(right_idx)
join_index = np.asarray(lvalues.take(left_idx))
mask = left_idx == -1
np.putmask(join_index, mask, rvalues.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(
self, other, level, how="left", return_indexers=False, keep_order=True
):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex.
If ```keep_order == True```, the order of the data indexed by the
MultiIndex will not be changed; otherwise, it will tie out
with `other`.
"""
from pandas.core.indexes.multi import MultiIndex
def _get_leaf_sorter(labels):
"""
Returns sorter for the inner most level while preserving the
order of higher levels.
"""
if labels[0].size == 0:
return np.empty(0, dtype="int64")
if len(labels) == 1:
lab = ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of beginning of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = ensure_int64(labels[-1])
return lib.get_level_sorter(lab, ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError("Join on level between two MultiIndex objects is ambiguous")
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {"right": "left", "left": "right"}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError(
"Index._join_level on non-unique index is not implemented"
)
new_level, left_lev_indexer, right_lev_indexer = old_level.join(
right, how=how, return_indexers=True
)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.codes[: level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level))
new_lev_codes = algos.take_nd(
rev_indexer, left.codes[level], allow_fill=False
)
new_codes = list(left.codes)
new_codes[level] = new_lev_codes
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_codes != -1
if not mask.all():
new_codes = [lab[mask] for lab in new_codes]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_codes.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_codes, ngroups
)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0] :]
new_codes = [lab[left_indexer] for lab in new_codes]
else: # sort the leaves
mask = new_lev_codes != -1
mask_all = mask.all()
if not mask_all:
new_codes = [lab[mask] for lab in new_codes]
left_indexer = _get_leaf_sorter(new_codes[: level + 1])
new_codes = [lab[left_indexer] for lab in new_codes]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(
levels=new_levels,
codes=new_codes,
names=left.names,
verify_integrity=False,
)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(
right_lev_indexer, join_index.codes[level], allow_fill=False
)
else:
right_indexer = join_index.codes[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (
None if left_indexer is None else ensure_platform_int(left_indexer)
)
right_indexer = (
None if right_indexer is None else ensure_platform_int(right_indexer)
)
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how="left", return_indexers=False):
# We only get here with matching dtypes
assert other.dtype == self.dtype
if self.equals(other):
ret_index = other if how == "right" else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
if is_extension_array_dtype(self.dtype):
sv = self._data._values_for_argsort()
ov = other._data._values_for_argsort()
else:
sv = self._values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == "left":
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == "right":
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == "left":
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == "right":
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == "inner":
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == "outer":
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else ensure_platform_int(lidx)
ridx = None if ridx is None else ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
return Index(joined, name=name)
# --------------------------------------------------------------------
# Uncategorized Methods
@property
def values(self) -> np.ndarray:
"""
Return an array representing the data in the Index.
.. warning::
We recommend using :attr:`Index.array` or
:meth:`Index.to_numpy`, depending on whether you need
a reference to the underlying data or a NumPy array.
Returns
-------
array: numpy.ndarray or ExtensionArray
See Also
--------
Index.array : Reference to the underlying data.
Index.to_numpy : A NumPy array representing the underlying data.
"""
return self._data.view(np.ndarray)
@cache_readonly
@Appender(IndexOpsMixin.array.__doc__) # type: ignore
def array(self) -> ExtensionArray:
array = self._data
if isinstance(array, np.ndarray):
from pandas.core.arrays.numpy_ import PandasArray
array = PandasArray(array)
return array
@property
def _values(self) -> Union[ExtensionArray, np.ndarray]:
"""
The best array representation.
This is an ndarray or ExtensionArray.
``_values`` are consistent between``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values |
----------------- | --------------- | ------------- |
Index | ndarray | ndarray |
CategoricalIndex | Categorical | Categorical |
DatetimeIndex | ndarray[M8ns] | DatetimeArray |
DatetimeIndex[tz] | ndarray[M8ns] | DatetimeArray |
PeriodIndex | ndarray[object] | PeriodArray |
IntervalIndex | IntervalArray | IntervalArray |
See Also
--------
values
"""
return self._data
def _get_engine_target(self) -> np.ndarray:
"""
Get the ndarray that we can pass to the IndexEngine constructor.
"""
return self._values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
result = super().memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def where(self, cond, other=None):
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
pandas.Index
A copy of self with values replaced from other
where the condition is False.
See Also
--------
Series.where : Same method for Series.
DataFrame.where : Same method for DataFrame.
Examples
--------
>>> idx = pd.Index(['car', 'bike', 'train', 'tractor'])
>>> idx
Index(['car', 'bike', 'train', 'tractor'], dtype='object')
>>> idx.where(idx.isin(['car', 'train']), 'other')
Index(['car', 'other', 'train', 'other'], dtype='object')
"""
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
# construction helpers
@classmethod
def _scalar_data_error(cls, data):
# We return the TypeError so that we can raise it from the constructor
# in order to keep mypy happy
return TypeError(
f"{cls.__name__}(...) must be called with a collection of some "
f"kind, {repr(data)} was passed"
)
@classmethod
def _string_data_error(cls, data):
raise TypeError(
"String dtype not supported, you may need "
"to explicitly cast to a numeric type"
)
def _coerce_scalar_to_index(self, item):
"""
We need to coerce a scalar to a compat for our index type.
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
def _to_safe_for_reshape(self):
"""
Convert to object if we are a categorical.
"""
return self
def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
return value
def _assert_can_do_op(self, value):
"""
Check value is valid for scalar op.
"""
if not is_scalar(value):
raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}")
@property
def _has_complex_internals(self) -> bool:
"""
Indicates if an index is not directly backed by a numpy array
"""
# used to avoid libreduction code paths, which raise or require conversion
return False
def _is_memory_usage_qualified(self) -> bool:
"""
Return a boolean if we need a qualified .info display.
"""
return self.is_object()
def is_type_compatible(self, kind) -> bool:
"""
Whether the index type is compatible with the provided type.
"""
return kind == self.inferred_type
def __contains__(self, key: Any) -> bool:
"""
Return a boolean indicating whether the provided key is in the index.
Parameters
----------
key : label
The key to check if it is present in the index.
Returns
-------
bool
Whether the key search is in the index.
Raises
------
TypeError
If the key is not hashable.
See Also
--------
Index.isin : Returns an ndarray of boolean dtype indicating whether the
list-like key is in the index.
Examples
--------
>>> idx = pd.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> 2 in idx
True
>>> 6 in idx
False
"""
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
def __hash__(self):
raise TypeError(f"unhashable type: {repr(type(self).__name__)}")
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
key = com.cast_scalar_indexer(key)
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
result = getitem(key)
if not is_scalar(result):
if np.ndim(result) > 1:
deprecate_ndim_indexing(result)
return result
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name) -> bool:
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError("all inputs must be Index")
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return Index._concat_same_dtype(self, to_concat, name=name)
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
# must be overridden in specific classes
klasses = (
ABCDatetimeIndex,
ABCTimedeltaIndex,
ABCPeriodIndex,
ExtensionArray,
ABCIntervalIndex,
)
to_concat = [
x.astype(object) if isinstance(x, klasses) else x for x in to_concat
]
self = to_concat[0]
attribs = self._get_attributes_dict()
attribs["name"] = name
to_concat = [x._values if isinstance(x, Index) else x for x in to_concat]
return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
def putmask(self, mask, value):
"""
Return a new Index of the values set with the mask.
Returns
-------
Index
See Also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
if is_period_dtype(self.dtype):
# .values cast to object, so we need to cast back
values = type(self)(values)._data
return self._shallow_copy(values)
except (ValueError, TypeError) as err:
if is_object_dtype(self):
raise err
# coerces to object
return self.astype(object).putmask(mask, value)
def equals(self, other) -> bool:
"""
Determine if two Index objects contain the same elements.
Returns
-------
bool
True if "other" is an Index and it has the same elements as calling
index; False otherwise.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self.dtype) and not is_object_dtype(other.dtype):
# if other is not object, use other's logic for coercion
return other.equals(self)
if isinstance(other, ABCMultiIndex):
# d-level MultiIndex can equal d-tuple Index
return other.equals(self)
if is_extension_array_dtype(other.dtype):
# All EA-backed Index subclasses override equals
return other.equals(self)
return array_equivalent(self._values, other._values)
def identical(self, other) -> bool:
"""
Similar to equals, but check that other comparable attributes are
also equal.
Returns
-------
bool
If two Index objects have equal elements and same type True,
otherwise False.
"""
return (
self.equals(other)
and all(
(
getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables
)
)
and type(self) == type(other)
)
def asof(self, label):
"""
Return the label from the index, or, if not present, the previous one.
Assuming that the index is sorted, return the passed index label if it
is in the index, or return the previous index label if the passed one
is not in the index.
Parameters
----------
label : object
The label up to which the method returns the latest index label.
Returns
-------
object
The passed label if it is in the index. The previous label if the
passed label is not in the sorted index or `NaN` if there is no
such label.
See Also
--------
Series.asof : Return the latest value in a Series up to the
passed index.
merge_asof : Perform an asof merge (similar to left join but it
matches on nearest key rather than equal key).
Index.get_loc : An `asof` is a thin wrapper around `get_loc`
with method='pad'.
Examples
--------
`Index.asof` returns the latest index label up to the passed label.
>>> idx = pd.Index(['2013-12-31', '2014-01-02', '2014-01-03'])
>>> idx.asof('2014-01-01')
'2013-12-31'
If the label is in the index, the method returns the passed label.
>>> idx.asof('2014-01-02')
'2014-01-02'
If all of the labels in the index are later than the passed label,
NaN is returned.
>>> idx.asof('1999-01-02')
nan
If the index is not sorted, an error is raised.
>>> idx_not_sorted = pd.Index(['2013-12-31', '2015-01-02',
... '2014-01-03'])
>>> idx_not_sorted.asof('2013-12-31')
Traceback (most recent call last):
ValueError: index must be monotonic increasing or decreasing
"""
try:
loc = self.get_loc(label, method="pad")
except KeyError:
return self._na_value
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
Find the locations (indices) of the labels from the index for
every entry in the `where` argument.
As in the `asof` function, if the label (a particular entry in
`where`) is not in the index, the latest index label up to the
passed label is chosen and its index returned.
If all of the labels in the index are later than a label in `where`,
-1 is returned.
`mask` is used to ignore NA values in the index during calculation.
Parameters
----------
where : Index
An Index consisting of an array of timestamps.
mask : array-like
Array of booleans denoting where values in the original
data are not NA.
Returns
-------
numpy.ndarray
An array of locations (indices) of the labels from the Index
which correspond to the return values of the `asof` function
for every element in `where`.
"""
locs = self.values[mask].searchsorted(where.values, side="right")
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where.values < self.values[first])] = -1
return result
def sort_values(self, return_indexer: bool = False, ascending: bool = True):
"""
Return a sorted copy of the index.
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
Returns
-------
sorted_index : pandas.Index
Sorted copy of the index.
indexer : numpy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
Series.sort_values : Sort values of a Series.
DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> idx = pd.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
"""
Use sort_values instead.
"""
raise TypeError("cannot sort an Index object in-place, use sort_values instead")
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or str, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.Index
Shifted index.
See Also
--------
Series.shift : Shift values of Series.
Notes
-----
This method is only implemented for datetime-like index classes,
i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.
Examples
--------
Put the first 5 month starts of 2011 into an index.
>>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')
>>> month_starts
DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',
'2011-05-01'],
dtype='datetime64[ns]', freq='MS')
Shift the index by 10 days.
>>> month_starts.shift(10, freq='D')
DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',
'2011-05-11'],
dtype='datetime64[ns]', freq=None)
The default value of `freq` is the `freq` attribute of the index,
which is 'MS' (month start) in this example.
>>> month_starts.shift(10)
DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
"""
raise NotImplementedError(f"Not supported for type {type(self).__name__}")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Return the integer indices that would sort the index.
Parameters
----------
*args
Passed to `numpy.ndarray.argsort`.
**kwargs
Passed to `numpy.ndarray.argsort`.
Returns
-------
numpy.ndarray
Integer indices that would sort the index if used as
an indexer.
See Also
--------
numpy.argsort : Similar method for NumPy arrays.
Index.sort_values : Return sorted copy of Index.
Examples
--------
>>> idx = pd.Index(['b', 'a', 'd', 'c'])
>>> idx
Index(['b', 'a', 'd', 'c'], dtype='object')
>>> order = idx.argsort()
>>> order
array([1, 0, 3, 2])
>>> idx[order]
Index(['a', 'b', 'c', 'd'], dtype='object')
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def get_value(self, series: "Series", key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing.
Returns
-------
scalar or Series
"""
if not is_scalar(key):
# if key is not a scalar, directly raise an error (the code below
# would convert to numpy arrays and raise later any way) - GH29926
raise InvalidIndexError(key)
try:
# GH 20882, 21257
# First try to convert the key to a location
# If that fails, raise a KeyError if an integer
# index, otherwise, see if key is an integer, and
# try that
loc = self.get_loc(key)
except KeyError:
if not self._should_fallback_to_positional():
raise
elif is_integer(key):
# If the Index cannot hold integer, then this is unambiguously
# a locational lookup.
loc = key
else:
raise
return self._get_values_for_loc(series, loc, key)
def _should_fallback_to_positional(self) -> bool:
"""
If an integer key is not found, should we fall back to positional indexing?
"""
if len(self) > 0 and (self.holds_integer() or self.is_boolean()):
return False
return True
def _get_values_for_loc(self, series: "Series", loc, key):
"""
Do a positional lookup on the given Series, returning either a scalar
or a Series.
Assumes that `series.index is self`
key is included for MultiIndex compat.
"""
if is_integer(loc):
return series._values[loc]
return series.iloc[loc]
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray.
.. deprecated:: 1.0
Notes
-----
Only use this if you know what you're doing.
"""
warnings.warn(
(
"The 'set_value' method is deprecated, and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
loc = self._engine.get_loc(key)
validate_numeric_casting(arr.dtype, value)
arr[loc] = value
_index_shared_docs[
"get_indexer_non_unique"
] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array.
"""
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ensure_index(target)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if is_categorical_dtype(target.dtype):
tgt_values = np.asarray(target)
else:
tgt_values = target._get_engine_target()
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return ensure_platform_int(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
numpy.ndarray
List of indices.
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
if self.inferred_type == "date" and isinstance(other, ABCDatetimeIndex):
return type(other)(self), other
elif self.inferred_type == "boolean":
if not is_object_dtype(self.dtype):
return self.astype("object"), other.astype("object")
return self, other
def groupby(self, values) -> PrettyDict[Hashable, np.ndarray]:
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
if isinstance(values, ABCMultiIndex):
values = values.values
values = ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in result.items()}
return PrettyDict(result)
def map(self, mapper, na_action=None):
"""
Map values using input correspondence (a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from pandas.core.indexes.multi import MultiIndex
new_values = super()._map_values(mapper, na_action=na_action)
attributes = self._get_attributes_dict()
# we can return a MultiIndex
if new_values.size and isinstance(new_values[0], tuple):
if isinstance(self, MultiIndex):
names = self.names
elif attributes.get("name"):
names = [attributes.get("name")] * len(new_values[0])
else:
names = None
return MultiIndex.from_tuples(new_values, names=names)
attributes["copy"] = False
if not new_values.size:
# empty
attributes["dtype"] = self.dtype
return Index(new_values, **attributes)
# TODO: De-duplicate with map, xref GH#32349
def _transform_index(self, func, level=None) -> "Index":
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
Only apply function to one level of the MultiIndex if level is specified.
"""
if isinstance(self, ABCMultiIndex):
if level is not None:
items = [
tuple(func(y) if i == level else y for i, y in enumerate(x))
for x in self
]
else:
items = [tuple(func(y) for y in x) for x in self]
return type(self).from_tuples(items, names=self.names)
else:
items = [func(x) for x in self]
return Index(items, name=self.name, tupleize_cols=False)
def isin(self, values, level=None):
"""
Return a boolean array where the index values are in `values`.
Compute boolean array of whether each index value is found in the
passed set of values. The length of the returned boolean array matches
the length of the index.
Parameters
----------
values : set or list-like
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index is a
`MultiIndex`).
Returns
-------
is_contained : ndarray
NumPy array of boolean values.
See Also
--------
Series.isin : Same for Series.
DataFrame.isin : Same method for DataFrames.
Notes
-----
In the case of `MultiIndex` you must either specify `values` as a
list-like object containing tuples that are the same length as the
number of levels, or specify `level`. Otherwise it will raise a
``ValueError``.
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Examples
--------
>>> idx = pd.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
>>> midx = pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color'))
>>> midx
MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],
codes=[[0, 1, 2], [2, 0, 1]],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
For a DatetimeIndex, string values in `values` are converted to
Timestamps.
>>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']
>>> dti = pd.to_datetime(dates)
>>> dti
DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],
dtype='datetime64[ns]', freq=None)
>>> dti.isin(['2000-03-11'])
array([ True, False, False])
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(self, values)
def _get_string_slice(self, key: str_t, use_lhs: bool = True, use_rhs: bool = True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered or unique index, compute the slice indexer for input
labels and step.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, default None
kind : str, default None
Returns
-------
indexer : slice
Raises
------
KeyError : If key does not exist, or key is not unique and index is
not ordered.
Notes
-----
This function assumes that the data is sorted, so use at your own peril
Examples
--------
This is a method on all index types. For example you can do:
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_indexer(start='b', end='c')
slice(1, 3)
>>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])
>>> idx.slice_indexer(start='b', end=('c', 'g'))
slice(1, 3)
"""
start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index, then try to cast
to an int if equivalent.
"""
if not self.is_floating():
return com.cast_scalar_indexer(key)
return key
def _validate_indexer(self, form: str_t, key, kind: str_t):
"""
If we are positional indexer, validate that we have appropriate
typed bounds must be an integer.
"""
assert kind in ["getitem", "iloc"]
if key is None:
pass
elif is_integer(key):
pass
else:
self._invalid_indexer(form, key)
def _maybe_cast_slice_bound(self, label, side: str_t, kind):
"""
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
self._invalid_indexer("slice", label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer("slice", label)
return label
def _searchsorted_monotonic(self, label, side="left"):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(
label, side="right" if side == "left" else "left"
)
return len(self) - pos
raise ValueError("index must be monotonic increasing or decreasing")
def get_slice_bound(self, label, side: str_t, kind) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
int
Index of label.
"""
assert kind in ["loc", "getitem", None]
if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg, must be either "
f"'left' or 'right': {side}"
)
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self.get_loc(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view("u1"))
else:
slc = lib.maybe_indices_to_slice(slc.astype("i8"), len(self))
if isinstance(slc, np.ndarray):
raise KeyError(
f"Cannot get {side} slice bound for non-unique "
f"label: {repr(original_label)}"
)
if isinstance(slc, slice):
if side == "left":
return slc.start
else:
return slc.stop
else:
if side == "right":
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning.
end : label, default None
If None, defaults to the end.
step : int, defaults None
If None, defaults to 1.
kind : {'loc', 'getitem'} or None
Returns
-------
start, end : int
See Also
--------
Index.get_loc : Get location for a single label.
Notes
-----
This method only works if the index is monotonic or unique.
Examples
--------
>>> idx = pd.Index(list('abcd'))
>>> idx.slice_locs(start='b', end='c')
(1, 3)
"""
inc = step is None or step >= 0
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
# GH 16785: If start and end happen to be date strings with UTC offsets
# attempt to parse and check that the offsets are the same
if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)):
try:
ts_start = Timestamp(start)
ts_end = Timestamp(end)
except (ValueError, TypeError):
pass
else:
if not tz_compare(ts_start.tzinfo, ts_end.tzinfo):
raise ValueError("Both dates must have the same UTC offset")
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, "left", kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, "right", kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted.
Parameters
----------
loc : int or list of int
Location of item(-s) which will be deleted.
Use a list of locations to delete more than one value at the same time.
Returns
-------
Index
New Index with passed location(-s) deleted.
See Also
--------
numpy.delete : Delete any rows and column from NumPy array (ndarray).
Examples
--------
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete(1)
Index(['a', 'c'], dtype='object')
>>> idx = pd.Index(['a', 'b', 'c'])
>>> idx.delete([0, 2])
Index(['b'], dtype='object')
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc: int, item):
"""
Make new Index inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
# Note: this method is overridden by all ExtensionIndex subclasses,
# so self is never backed by an EA.
arr = np.asarray(self)
item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate((arr[:loc], item, arr[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors: str_t = "raise"):
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask]} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
# --------------------------------------------------------------------
# Generated Arithmetic, Comparison, and Unary Methods
@classmethod
def _add_comparison_methods(cls):
"""
Add in comparison methods.
"""
cls.__eq__ = _make_comparison_op(operator.eq, cls)
cls.__ne__ = _make_comparison_op(operator.ne, cls)
cls.__lt__ = _make_comparison_op(operator.lt, cls)
cls.__gt__ = _make_comparison_op(operator.gt, cls)
cls.__le__ = _make_comparison_op(operator.le, cls)
cls.__ge__ = _make_comparison_op(operator.ge, cls)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
"""
Add in the numeric add/sub methods to disable.
"""
cls.__add__ = make_invalid_op("__add__")
cls.__radd__ = make_invalid_op("__radd__")
cls.__iadd__ = make_invalid_op("__iadd__")
cls.__sub__ = make_invalid_op("__sub__")
cls.__rsub__ = make_invalid_op("__rsub__")
cls.__isub__ = make_invalid_op("__isub__")
@classmethod
def _add_numeric_methods_disabled(cls):
"""
Add in numeric methods to disable other than add/sub.
"""
cls.__pow__ = make_invalid_op("__pow__")
cls.__rpow__ = make_invalid_op("__rpow__")
cls.__mul__ = make_invalid_op("__mul__")
cls.__rmul__ = make_invalid_op("__rmul__")
cls.__floordiv__ = make_invalid_op("__floordiv__")
cls.__rfloordiv__ = make_invalid_op("__rfloordiv__")
cls.__truediv__ = make_invalid_op("__truediv__")
cls.__rtruediv__ = make_invalid_op("__rtruediv__")
cls.__mod__ = make_invalid_op("__mod__")
cls.__divmod__ = make_invalid_op("__divmod__")
cls.__neg__ = make_invalid_op("__neg__")
cls.__pos__ = make_invalid_op("__pos__")
cls.__abs__ = make_invalid_op("__abs__")
cls.__inv__ = make_invalid_op("__inv__")
@classmethod
def _add_numeric_methods_binary(cls):
"""
Add in numeric methods.
"""
cls.__add__ = _make_arithmetic_op(operator.add, cls)
cls.__radd__ = _make_arithmetic_op(ops.radd, cls)
cls.__sub__ = _make_arithmetic_op(operator.sub, cls)
cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls)
cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls)
cls.__pow__ = _make_arithmetic_op(operator.pow, cls)
cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls)
cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls)
# TODO: rmod? rdivmod?
cls.__mod__ = _make_arithmetic_op(operator.mod, cls)
cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls)
cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls)
cls.__divmod__ = _make_arithmetic_op(divmod, cls)
cls.__mul__ = _make_arithmetic_op(operator.mul, cls)
cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)
@classmethod
def _add_numeric_methods_unary(cls):
"""
Add in numeric unary methods.
"""
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
attrs = self._get_attributes_dict()
return Index(op(self.values), **attrs)
_evaluate_numeric_unary.__name__ = opstr
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(operator.neg, "__neg__")
cls.__pos__ = _make_evaluate_unary(operator.pos, "__pos__")
cls.__abs__ = _make_evaluate_unary(np.abs, "__abs__")
cls.__inv__ = _make_evaluate_unary(lambda x: -x, "__inv__")
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
"""
Add in logical methods.
"""
_doc = """
%(desc)s
Parameters
----------
*args
These parameters will be passed to numpy.%(outname)s.
**kwargs
These parameters will be passed to numpy.%(outname)s.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
_index_shared_docs["index_all"] = dedent(
"""
See Also
--------
Index.any : Return whether any element in an Index is True.
Series.any : Return whether any element in a Series is True.
Series.all : Return whether all elements in a Series are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
**all**
True, because nonzero integers are considered True.
>>> pd.Index([1, 2, 3]).all()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 1, 2]).all()
False
**any**
True, because ``1`` is considered True.
>>> pd.Index([0, 0, 1]).any()
True
False, because ``0`` is considered False.
>>> pd.Index([0, 0, 0]).any()
False
"""
)
_index_shared_docs["index_any"] = dedent(
"""
See Also
--------
Index.all : Return whether all elements are True.
Series.all : Return whether all elements are True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to True because these are not equal to zero.
Examples
--------
>>> index = pd.Index([0, 1, 2])
>>> index.any()
True
>>> index = pd.Index([0, 0, 0])
>>> index.any()
False
"""
)
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_index_shared_docs["index_" + name])
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (
isinstance(result, (np.ndarray, ABCSeries, Index))
and result.ndim == 0
):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function(
"all", "Return whether all elements are True.", np.all
)
cls.any = _make_logical_function(
"any", "Return whether any element is True.", np.any
)
@classmethod
def _add_logical_methods_disabled(cls):
"""
Add in logical methods to disable.
"""
cls.all = make_invalid_op("all")
cls.any = make_invalid_op("any")
@property
def shape(self):
"""
Return a tuple of the shape of the underlying data.
"""
# not using "(len(self), )" to return "correct" shape if the values
# consists of a >1 D array (see GH-27775)
# overridden in MultiIndex.shape to avoid materializing the values
return self._values.shape
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
Index._add_comparison_methods()
def ensure_index_from_sequences(sequences, names=None):
"""
Construct an index from sequences of data.
A single sequence returns an Index. Many sequences returns a
MultiIndex.
Parameters
----------
sequences : sequence of sequences
names : sequence of str
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index_from_sequences([[1, 2, 3]], names=['name'])
Int64Index([1, 2, 3], dtype='int64', name='name')
>>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']],
names=['L1', 'L2'])
MultiIndex([('a', 'a'),
('a', 'b')],
names=['L1', 'L2'])
See Also
--------
ensure_index
"""
from pandas.core.indexes.multi import MultiIndex
if len(sequences) == 1:
if names is not None:
names = names[0]
return Index(sequences[0], name=names)
else:
return MultiIndex.from_arrays(sequences, names=names)
def ensure_index(index_like, copy=False):
"""
Ensure that we have an index from some index-like object.
Parameters
----------
index : sequence
An Index or other sequence
copy : bool
Returns
-------
index : Index or MultiIndex
Examples
--------
>>> ensure_index(['a', 'b'])
Index(['a', 'b'], dtype='object')
>>> ensure_index([('a', 'a'), ('b', 'c')])
Index([('a', 'a'), ('b', 'c')], dtype='object')
>>> ensure_index([['a', 'a'], ['b', 'c']])
MultiIndex([('a', 'b'),
('a', 'c')],
dtype='object')
)
See Also
--------
ensure_index_from_sequences
"""
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, "name"):
return Index(index_like, name=index_like.name, copy=copy)
if is_iterator(index_like):
index_like = list(index_like)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from pandas.core.indexes.multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _ensure_has_len(seq):
"""
If seq is an iterator, put its values into a list.
"""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
def _trim_front(strings):
"""
Trims zeros and decimal points.
"""
trimmed = strings
while len(strings) > 0 and all(x[0] == " " for x in trimmed):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _validate_join_method(method):
if method not in ["left", "right", "inner", "outer"]:
raise ValueError(f"do not recognize join method {method}")
def default_index(n):
from pandas.core.indexes.range import RangeIndex
return RangeIndex(0, n, name=None)
def maybe_extract_name(name, obj, cls) -> Label:
"""
If no name is passed, then extract it from data, validating hashability.
"""
if name is None and isinstance(obj, (Index, ABCSeries)):
# Note we don't just check for "name" attribute since that would
# pick up e.g. dtype.name
name = obj.name
# GH#29069
if not is_hashable(name):
raise TypeError(f"{cls.__name__}.name must be a hashable type")
return name
def _maybe_cast_with_dtype(data: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
"""
If a dtype is passed, cast to the closest matching dtype that is supported
by Index.
Parameters
----------
data : np.ndarray
dtype : np.dtype
copy : bool
Returns
-------
np.ndarray
"""
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data, skipna=False)
if inferred == "integer":
data = maybe_cast_to_integer_array(data, dtype, copy=copy)
elif inferred in ["floating", "mixed-integer-float"]:
if isna(data).any():
raise ValueError("cannot convert float NaN to integer")
if inferred == "mixed-integer-float":
data = maybe_cast_to_integer_array(data, dtype)
# If we are actually all equal to integers,
# then coerce to integer.
try:
data = _try_convert_to_int_array(data, copy, dtype)
except ValueError:
data = np.array(data, dtype=np.float64, copy=copy)
elif inferred == "string":
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data, skipna=False)
if inferred == "string":
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
return data
def _maybe_cast_data_without_dtype(subarr):
"""
If we have an arraylike input but no passed dtype, try to infer
a supported dtype.
Parameters
----------
subarr : np.ndarray, Index, or Series
Returns
-------
converted : np.ndarray or ExtensionArray
dtype : np.dtype or ExtensionDtype
"""
# Runtime import needed bc IntervalArray imports Index
from pandas.core.arrays import (
IntervalArray,
PeriodArray,
DatetimeArray,
TimedeltaArray,
)
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred == "integer":
try:
data = _try_convert_to_int_array(subarr, False, None)
return data, data.dtype
except ValueError:
pass
return subarr, object
elif inferred in ["floating", "mixed-integer-float", "integer-na"]:
# TODO: Returns IntegerArray for integer-na case in the future
return subarr, np.float64
elif inferred == "interval":
try:
data = IntervalArray._from_sequence(subarr, copy=False)
return data, data.dtype
except ValueError:
# GH27172: mixed closed Intervals --> object dtype
pass
elif inferred == "boolean":
# don't support boolean explicitly ATM
pass
elif inferred != "string":
if inferred.startswith("datetime"):
try:
data = DatetimeArray._from_sequence(subarr, copy=False)
return data, data.dtype
except (ValueError, OutOfBoundsDatetime):
# GH 27011
# If we have mixed timezones, just send it
# down the base constructor
pass
elif inferred.startswith("timedelta"):
data = TimedeltaArray._from_sequence(subarr, copy=False)
return data, data.dtype
elif inferred == "period":
try:
data = PeriodArray._from_sequence(subarr)
return data, data.dtype
except IncompatibleFrequency:
pass
return subarr, subarr.dtype
def _try_convert_to_int_array(
data: np.ndarray, copy: bool, dtype: np.dtype
) -> np.ndarray:
"""
Attempt to convert an array of data into an integer array.
Parameters
----------
data : The data to convert.
copy : bool
Whether to copy the data or not.
dtype : np.dtype
Returns
-------
int_array : data converted to either an ndarray[int64] or ndarray[uint64]
Raises
------
ValueError if the conversion was not successful.
"""
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desired
try:
res = data.astype("i8", copy=False)
if (res == data).all():
return res # TODO: might still need to copy
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype("u8", copy=False)
if (res == data).all():
return res # TODO: might still need to copy
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
def _maybe_asobject(dtype, klass, data, copy: bool, name: Label, **kwargs):
"""
If an object dtype was specified, create the non-object Index
and then convert it to object.
Parameters
----------
dtype : np.dtype, ExtensionDtype, str
klass : Index subclass
data : list-like
copy : bool
name : hashable
**kwargs
Returns
-------
Index
Notes
-----
We assume that calling .astype(object) on this klass will make a copy.
"""
# GH#23524 passing `dtype=object` to DatetimeIndex is invalid,
# will raise in the where `data` is already tz-aware. So
# we leave it out of this step and cast to object-dtype after
# the DatetimeIndex construction.
if is_dtype_equal(_o_dtype, dtype):
# Note we can pass copy=False because the .astype below
# will always make a copy
index = klass(data, copy=False, name=name, **kwargs)
return index.astype(object)
return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
| 32.307839
| 88
| 0.560541
|
2e32cb41a530fd7b7174d641693f1f163ac6db27
| 1,067
|
py
|
Python
|
reseaut/apps/profiles/migrations/0001_initial.py
|
koladev32/Beta
|
30a889f0e62dbb2bf518c1bba78ecc3913783ba6
|
[
"Apache-2.0"
] | null | null | null |
reseaut/apps/profiles/migrations/0001_initial.py
|
koladev32/Beta
|
30a889f0e62dbb2bf518c1bba78ecc3913783ba6
|
[
"Apache-2.0"
] | 3
|
2021-10-06T05:49:36.000Z
|
2022-02-18T12:57:20.000Z
|
reseaut/apps/profiles/migrations/0001_initial.py
|
koladev32/Beta
|
30a889f0e62dbb2bf518c1bba78ecc3913783ba6
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-28 04:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('bio', models.TextField(blank=True)),
('image', models.URLField(blank=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at', '-updated_at'],
'abstract': False,
},
),
]
| 32.333333
| 121
| 0.592315
|
a7fc464bd6bb7cf321f953ebda332ca0907557e3
| 595
|
py
|
Python
|
scripts/data/nuc/pasteandrotate.py
|
abccdabfgc/simdjson
|
4c132012e39c43a4b23f0db1edda0ce134c59cdb
|
[
"Apache-2.0"
] | null | null | null |
scripts/data/nuc/pasteandrotate.py
|
abccdabfgc/simdjson
|
4c132012e39c43a4b23f0db1edda0ce134c59cdb
|
[
"Apache-2.0"
] | null | null | null |
scripts/data/nuc/pasteandrotate.py
|
abccdabfgc/simdjson
|
4c132012e39c43a4b23f0db1edda0ce134c59cdb
|
[
"Apache-2.0"
] | null | null | null |
import os
import csv
try: import pandas as pd
except ImportError:
import pip
pip.main(['install', '--user', 'pandas'])
import pandas as pd
def getdata(filename):
df = pd.read_csv(filename, delim_whitespace=True)
return (df["gb_per_s"].tolist())
ourdir=os.path.dirname(os.path.realpath(__file__))
answer = []
for file in os.listdir(ourdir):
if file.endswith(".table"):
fullpath = os.path.join(ourdir, file)
answer.append([file[:-11]]+getdata(fullpath))
print("#simdjson RapidJSON sajson")
answer.sort()
for l in answer:
print("\t".join(map(str,l)))
| 24.791667
| 53
| 0.672269
|
a11f8cf525b5946f65c505f7c288fd8f9e161812
| 4,486
|
py
|
Python
|
PYats/Genie/005_pyATS-Netbox_tests/utils/tests.py
|
CalvinDawgz/Python-Projects
|
cc5d1e93bcbe124c1864a49cc3dfbca7db520928
|
[
"MIT"
] | 1
|
2021-07-10T09:22:55.000Z
|
2021-07-10T09:22:55.000Z
|
PYats/Genie/005_pyATS-Netbox_tests/utils/tests.py
|
CalvinDawgz/Python-Projects
|
cc5d1e93bcbe124c1864a49cc3dfbca7db520928
|
[
"MIT"
] | null | null | null |
PYats/Genie/005_pyATS-Netbox_tests/utils/tests.py
|
CalvinDawgz/Python-Projects
|
cc5d1e93bcbe124c1864a49cc3dfbca7db520928
|
[
"MIT"
] | null | null | null |
def verify_interface_enabled(netbox_interfaces, pyats_interfaces):
"""Verifies whether the current interface state matches what's in NetBox
Args:
netbox_interface: Interfaces for a given device in NetBox
pyats_interfaces: Current interfaces on a device
Returns:
results: Dictionary containing list of passed interfaces and failed interfaces
"""
results = {
"PASS": [],
"FAILED": [],
}
for interface in netbox_interfaces:
interface_exists = interface.name in pyats_interfaces.keys()
pyats_interface_enabled = pyats_interfaces[interface.name]['enabled']
pyats_interface_oper_status = pyats_interfaces[interface.name]['oper_status']
if (interface_exists):
if (interface.enabled):
if (pyats_interface_enabled):
if (pyats_interface_oper_status == "up"):
print(f"✅ {interface.name} found in correct UP/UP state")
results['PASS'].append(interface)
else:
print(f"❌ {interface.name} found in incorrect UP/DOWN state")
results['FAILED'].append(interface)
elif (not pyats_interfaces[interface.name]['enabled']):
print(f"❌ {interface.name} found in incorrect DOWN/DOWN state")
results['FAILED'].append(interface)
elif (not interface.enabled):
if (pyats_interface_enabled):
if (pyats_interface_oper_status == "up"):
print(f"❌ {interface.name} found in incorrect UP/UP state")
results['FAILED'].append(interface)
else:
print(f"❌ {interface.name} found in incorrect UP/DOWN state")
results['FAILED'].append(interface)
elif (not pyats_interfaces[interface.name]['enabled']):
print(f"✅ {interface.name} found in correct DOWN/DOWN state")
results['PASS'].append(interface)
else:
print(f"❌ {interface.name} MISSING from device")
results["FAILED"].append(interface)
return results
def verify_interface_description(netbox_interfaces, pyats_interfaces):
"""Verifies whether the current interface description matches what's in NetBox
Args:
netbox_interface: Interfaces for a given device in NetBox
pyats_interfaces: Current interfaces on a device
Returns:
results: Dictionary containing list of passed interfaces and failed interfaces
"""
results = {
"PASS": [],
"FAILED": [],
}
for interface in netbox_interfaces:
interface_exists = interface.name in pyats_interfaces.keys()
interface_description_exists = len(interface.description) > 0
if (interface_exists):
if (interface_description_exists):
if ('description' in pyats_interfaces[interface.name].keys()):
if (pyats_interfaces[interface.name]['description'] == interface.description):
print(f"✅ {interface.name} description is CORRECT")
results['PASS'].append(interface)
elif (not pyats_interfaces[interface.name]['description'] == interface.description):
print(f"❌ {interface.name} description is INCORRECT and should be '{interface.description}'")
results["FAILED"].append(interface)
else:
print(f"❌ {interface.name} has NO description set. Should be '{interface.description}'")
results["FAILED"].append(interface)
else:
if ('description' not in pyats_interfaces[interface.name].keys()):
print(f"✅ {interface.name} has NO description set")
results['PASS'].append(interface)
else:
print(f"""❌ {interface.name} incorrectly has description '{pyats_interfaces[interface.name]['description']}' on switch""")
results["FAILED"].append(interface)
else:
print(f"❌ {interface.name} MISSING from device")
results["FAILED"].append(interface)
return results
| 47.221053
| 146
| 0.570218
|
31e79dfb0e80ad4cae96930c5af559507cdbee07
| 11,835
|
py
|
Python
|
models/encoder_inception_v4.py
|
yangsuhui/Attention_ocr_recognition
|
360aac324573092f5afea924d6548fa2ddf6c4e9
|
[
"MIT"
] | 3
|
2020-11-06T08:16:06.000Z
|
2021-09-14T09:27:45.000Z
|
models/encoder_inception_v4.py
|
yangsuhui/Attention_ocr_recognition
|
360aac324573092f5afea924d6548fa2ddf6c4e9
|
[
"MIT"
] | null | null | null |
models/encoder_inception_v4.py
|
yangsuhui/Attention_ocr_recognition
|
360aac324573092f5afea924d6548fa2ddf6c4e9
|
[
"MIT"
] | 1
|
2021-06-20T18:52:06.000Z
|
2021-06-20T18:52:06.000Z
|
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
import os
import sys
__all__ = ['InceptionV4', 'inceptionv4']
pretrained_settings = {
'inceptionv4': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1000
},
'imagenet+background': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionv4-8e4777a0.pth',
'input_space': 'RGB',
'input_size': [3, 299, 299],
'input_range': [0, 1],
'mean': [0.5, 0.5, 0.5],
'std': [0.5, 0.5, 0.5],
'num_classes': 1001
}
}
}
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=False) # verify bias false
self.bn = nn.BatchNorm2d(out_planes,
eps=0.001, # value found in tensorflow
momentum=0.1, # default pytorch value
affine=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Mixed_3a(nn.Module):
def __init__(self):
super(Mixed_3a, self).__init__()
self.maxpool = nn.MaxPool2d(3, stride=2)
self.conv = BasicConv2d(64, 96, kernel_size=3, stride=2)
def forward(self, x):
x0 = self.maxpool(x)
x1 = self.conv(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_4a(nn.Module):
def __init__(self):
super(Mixed_4a, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1)
)
self.branch1 = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1, stride=1),
BasicConv2d(64, 64, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(64, 64, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(64, 96, kernel_size=(3,3), stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
out = torch.cat((x0, x1), 1)
return out
class Mixed_5a(nn.Module):
def __init__(self):
super(Mixed_5a, self).__init__()
self.conv = BasicConv2d(192, 192, kernel_size=3, stride=2)
self.maxpool = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.conv(x)
x1 = self.maxpool(x)
out = torch.cat((x0, x1), 1)
return out
class Inception_A(nn.Module):
def __init__(self):
super(Inception_A, self).__init__()
self.branch0 = BasicConv2d(384, 96, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1)
)
self.branch2 = nn.Sequential(
BasicConv2d(384, 64, kernel_size=1, stride=1),
BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),
BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(384, 96, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_A(nn.Module):
def __init__(self):
super(Reduction_A, self).__init__()
self.branch0 = BasicConv2d(384, 384, kernel_size=3, stride=2)
self.branch1 = nn.Sequential(
BasicConv2d(384, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=3, stride=1, padding=1),
BasicConv2d(224, 256, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_B(nn.Module):
def __init__(self):
super(Inception_B, self).__init__()
self.branch0 = BasicConv2d(1024, 384, kernel_size=1, stride=1)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 256, kernel_size=(7,1), stride=1, padding=(3,0))
)
self.branch2 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(192, 224, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(224, 224, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(224, 256, kernel_size=(1,7), stride=1, padding=(0,3))
)
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1024, 128, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class Reduction_B(nn.Module):
def __init__(self):
super(Reduction_B, self).__init__()
self.branch0 = nn.Sequential(
BasicConv2d(1024, 192, kernel_size=1, stride=1),
BasicConv2d(192, 192, kernel_size=3, stride=2)
)
self.branch1 = nn.Sequential(
BasicConv2d(1024, 256, kernel_size=1, stride=1),
BasicConv2d(256, 256, kernel_size=(1,7), stride=1, padding=(0,3)),
BasicConv2d(256, 320, kernel_size=(7,1), stride=1, padding=(3,0)),
BasicConv2d(320, 320, kernel_size=3, stride=2)
)
self.branch2 = nn.MaxPool2d(3, stride=2)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
out = torch.cat((x0, x1, x2), 1)
return out
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1,3), stride=1, padding=(0,1))
self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3,1), stride=1, padding=(1,0))
self.branch3 = nn.Sequential(
nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
BasicConv2d(1536, 256, kernel_size=1, stride=1)
)
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out
class InceptionV4(nn.Module):
def __init__(self, num_classes=1001):
super(InceptionV4, self).__init__()
# Special attributs
self.input_space = None
self.input_size = (299, 299, 3)
self.mean = None
self.std = None
# Modules
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=3, stride=2),
BasicConv2d(32, 32, kernel_size=3, stride=1),
BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1),
Mixed_3a(),
Mixed_4a(),
Mixed_5a(),
Inception_A(),
Inception_A(),
Inception_A(),
Inception_A(),
Reduction_A(), # Mixed_6a
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Inception_B(),
Reduction_B(), # Mixed_7a
Inception_C(),
Inception_C(),
Inception_C()
)
self.last_linear = nn.Linear(1536, num_classes)
def logits(self, features):
#Allows image of any size to be processed
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def inceptionv4(num_classes=1000, pretrained='imagenet'):
if pretrained:
settings = pretrained_settings['inceptionv4'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
# both 'imagenet'&'imagenet+background' are loaded from same parameters
model = InceptionV4(num_classes=1001)
model.load_state_dict(model_zoo.load_url(settings['url']))
if pretrained == 'imagenet':
new_last_linear = nn.Linear(1536, 1000)
new_last_linear.weight.data = model.last_linear.weight.data[1:]
new_last_linear.bias.data = model.last_linear.bias.data[1:]
model.last_linear = new_last_linear
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
else:
model = InceptionV4(num_classes=num_classes)
return model
'''
TEST
Run this code with:
```
cd $HOME/pretrained-models.pytorch
python -m pretrainedmodels.inceptionv4
```
'''
if __name__ == '__main__':
assert inceptionv4(num_classes=10, pretrained=None)
print('success')
assert inceptionv4(num_classes=1000, pretrained='imagenet')
print('success')
assert inceptionv4(num_classes=1001, pretrained='imagenet+background')
print('success')
# fail
assert inceptionv4(num_classes=1001, pretrained='imagenet')
| 33.058659
| 95
| 0.56147
|
ad48ca65a99ee596edd8c14143e57055eacbce6f
| 245
|
py
|
Python
|
Chapter 09/Chap09_Example9.20.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 09/Chap09_Example9.20.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 09/Chap09_Example9.20.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
with open('readinput.txt') as myfile1:
print(myfile1.tell()) # T1
print("-----------")
print(myfile1.read(4)) # T2
print(myfile1.tell()) # T3
print("-----------")
print(myfile1.read()) # T4
print(myfile1.tell()) # T5
| 27.222222
| 38
| 0.530612
|
d7cf740d603957e0a915500703af07c95bb8f3a7
| 4,925
|
py
|
Python
|
app/api/helpers/constants.py
|
lunyamwis/laylinks-bend
|
04ff9ebb5da482e5b2642a89654a5b5f0128eaaa
|
[
"MIT"
] | null | null | null |
app/api/helpers/constants.py
|
lunyamwis/laylinks-bend
|
04ff9ebb5da482e5b2642a89654a5b5f0128eaaa
|
[
"MIT"
] | null | null | null |
app/api/helpers/constants.py
|
lunyamwis/laylinks-bend
|
04ff9ebb5da482e5b2642a89654a5b5f0128eaaa
|
[
"MIT"
] | null | null | null |
USER_REQUIRED_FIELD = ['first_name', 'last_name',
'username', 'email', 'password', 'phone_number']
EMPLOYEE_REQUIRED_FIELD = ['first_name', 'last_name', "email"]
INDIVIDUAL_CLIENT_REQUIRED_FIELD = ['first_name', 'last_name', 'gender']
BLOG_REQUIRED_FIELD = ['title','post','summary']
CORPORATE_CLIENT_REQUIRED_FIELD = ['name', ]
RECEIPT_REQUIRED_FIELD = ["transaction_date",
"amount_figures", "amount_words", "payment_mode"]
CONTACT_PERSON_REQUIRED_FIELD = ['name', 'position', 'phone_number', 'email',
'gender', 'service_line', ]
ADDITIONAL_PREMIUMS_REQUIRED_FIELDS = ['premium', 'minimum_amount']
INSURANCE_CO_REQUIRED_FIELD = ['name', 'email', 'mobile_number', 'contact_person',
'postal_address', ]
MOTOR_POLICY_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'policy_commission_rate',
'end_date', 'vehicles', 'insurance_class', 'minimum_premium_amount',
'transaction_type', 'premium_type', 'vehicles', 'insurance_company']
PROFESSIONAL_INDEMNITY_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date',
'end_date', 'sum_insured', 'premium_type',
'total_premium', 'commission_rate', 'commission_amount', 'specialty_class',
'transaction_type', 'premium_type', 'insurance_company']
INDIVIDUAL_MEDICAL_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'medical_insurance',
'transaction_type', 'premium_type', 'insurance_company']
GROUP_MEDICAL_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'medical_insurances',
'transaction_type', 'premium_type', 'insurance_company']
DOMESTIC_PACKAGE_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'package_details',
'transaction_type', 'premium_type', 'insurance_company']
PERSONAL_ACC_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'benefit_limits',
'transaction_type', 'premium_type', 'insurance_company']
PACKAGE_DETAILS_REQUIRED_FIELD = ['buildings', 'contents', 'all_risks',
'work_man_injury', 'owner_liability', 'occupiers_liability']
FIRE_DETAILS_REQUIRED_FIELD = ['name', 'description', 'value']
FIRE_POLICY_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'properties',
'transaction_type', 'premium_type', 'insurance_company']
WIBA_POLICY_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'no_of_staff',
'transaction_type', 'premium_type', 'insurance_company',
'estimate_annual_earning']
TRAVEL_REQUIRED_FIELD = ['policy_no', 'transaction_date', 'start_date', 'debit_note_no',
'end_date', 'premium_type', 'commission_rate', 'travel_details',
'transaction_type', 'premium_type', 'insurance_company']
TRAVEL_DETAILS_REQUIRED_FIELD = ['option', 'passport_no', 'date_of_travel', 'next_of_kin',
'countries_of_travel', 'modes_of_travel', 'reasons_of_travel']
SEND_MESSAGE_REQUIRED_FIELD = []
MEDICAL_INS_REQUIRED_FIELD = ['inpatient_limit', 'outpatient_limit', 'family_size']
MOTOR_VEHICLE_REQUIRED_FIELD = ['registration_no', 'make', 'model', 'body', 'color', 'year_of_manufacture',
'chassis_no', 'engine_no', 'seating_capacity', 'cc', 'tonnage']
SUCCESS_ACTION = "{} successfully"
MAIL_SUBJECT = 'Activate your account at {}'
SIGNUP_SUCCESS = "{0} account created successfully. {0} should check their email for confirmation"
AGENCY_SIGNUP_SUCCESS = "Agency and admin accounts created successfully. Please check the admin and agency emails for confirmaton"
USER_MAIL_BODY_MSG = "Thanks for choosing {}"
ADMIN_MAIL_BODY_MSG = "Thank you for registering {}. You are now the admin"
ACCOUNT_ACTIVATION_MSG = "To activate your account. Please click the lik below"
AGENCY_MAIL_BODY_MSG = "This is the insurance agency activation email "
GENDER_OPTIONS = {"M": "Male", "F": "Female", "O": "Prefer not to disclose"}
| 80.737705
| 130
| 0.637766
|
be7ec4dcd7225878b0e3d112be94236ddc52f45d
| 10,402
|
py
|
Python
|
sahara/context.py
|
esikachev/sahara-backup
|
a470fa6aec5f1009d41d82fabc1e5d64874aa213
|
[
"Apache-2.0"
] | null | null | null |
sahara/context.py
|
esikachev/sahara-backup
|
a470fa6aec5f1009d41d82fabc1e5d64874aa213
|
[
"Apache-2.0"
] | null | null | null |
sahara/context.py
|
esikachev/sahara-backup
|
a470fa6aec5f1009d41d82fabc1e5d64874aa213
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
from eventlet.green import threading
from eventlet.green import time
from eventlet import greenpool
from eventlet import semaphore
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Context(context.RequestContext):
def __init__(self,
user_id=None,
tenant_id=None,
auth_token=None,
service_catalog=None,
username=None,
tenant_name=None,
roles=None,
is_admin=None,
remote_semaphore=None,
auth_uri=None,
resource_uuid=None,
current_instance_info=None,
request_id=None,
overwrite=True,
**kwargs):
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: '
'{args}').format(args=kwargs))
super(Context, self).__init__(auth_token=auth_token,
user=user_id,
tenant=tenant_id,
is_admin=is_admin,
resource_uuid=resource_uuid,
request_id=request_id)
self.service_catalog = service_catalog
self.username = username
self.tenant_name = tenant_name
self.remote_semaphore = remote_semaphore or semaphore.Semaphore(
CONF.cluster_remote_threshold)
self.roles = roles
if auth_uri:
self.auth_uri = auth_uri
else:
self.auth_uri = _get_auth_uri()
if overwrite or not hasattr(context._request_store, 'context'):
self.update_store()
if current_instance_info is not None:
self.current_instance_info = current_instance_info
else:
self.current_instance_info = InstanceInfo()
def clone(self):
return Context(
self.user_id,
self.tenant_id,
self.auth_token,
self.service_catalog,
self.username,
self.tenant_name,
self.roles,
self.is_admin,
self.remote_semaphore,
self.auth_uri,
self.resource_uuid,
self.current_instance_info,
self.request_id,
overwrite=False)
def to_dict(self):
return {
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'username': self.username,
'tenant_name': self.tenant_name,
'user_name': self.username,
'project_name': self.tenant_name,
'is_admin': self.is_admin,
'roles': self.roles,
'auth_uri': self.auth_uri,
'resource_uuid': self.resource_uuid,
'request_id': self.request_id,
}
def is_auth_capable(self):
return (self.service_catalog and self.auth_token and self.tenant and
self.user_id)
# NOTE(adrienverge): The Context class uses the 'user' and 'tenant'
# properties internally (inherited from oslo_context), but Sahara code
# often uses 'user_id' and 'tenant_id'.
@property
def user_id(self):
return self.user
@user_id.setter
def user_id(self, value):
self.user = value
@property
def tenant_id(self):
return self.tenant
@tenant_id.setter
def tenant_id(self, value):
self.tenant = value
def get_admin_context():
return Context(is_admin=True)
_CTX_STORE = threading.local()
_CTX_KEY = 'current_ctx'
def has_ctx():
return hasattr(_CTX_STORE, _CTX_KEY)
def ctx():
if not has_ctx():
raise ex.IncorrectStateError(_("Context isn't available here"))
return getattr(_CTX_STORE, _CTX_KEY)
def current():
return ctx()
def set_ctx(new_ctx):
if not new_ctx and has_ctx():
delattr(_CTX_STORE, _CTX_KEY)
if hasattr(context._request_store, 'context'):
delattr(context._request_store, 'context')
if new_ctx:
setattr(_CTX_STORE, _CTX_KEY, new_ctx)
setattr(context._request_store, 'context', new_ctx)
def _get_auth_uri():
if CONF.keystone_authtoken.auth_uri is not None:
auth_uri = CONF.keystone_authtoken.auth_uri
else:
if CONF.keystone_authtoken.identity_uri is not None:
identity_uri = CONF.keystone_authtoken.identity_uri
else:
host = CONF.keystone_authtoken.auth_host
port = CONF.keystone_authtoken.auth_port
protocol = CONF.keystone_authtoken.auth_protocol
identity_uri = '%s://%s:%s' % (protocol, host, port)
if CONF.use_identity_api_v3 is False:
auth_version = 'v2.0'
else:
auth_version = 'v3'
auth_uri = '%s/%s' % (identity_uri, auth_version)
return auth_uri
def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs):
try:
set_ctx(ctx)
func(*args, **kwargs)
except BaseException as e:
LOG.debug(
"Thread {thread} failed with exception: {exception}".format(
thread=thread_description, exception=e))
if thread_group and not thread_group.exc:
thread_group.exc = e
thread_group.failed_thread = thread_description
finally:
if thread_group:
thread_group._on_thread_exit()
set_ctx(None)
def spawn(thread_description, func, *args, **kwargs):
eventlet.spawn(_wrapper, current().clone(), thread_description,
None, func, *args, **kwargs)
class ThreadGroup(object):
"""ThreadGroup object.
It is advised to use TreadGroup as a context manager instead
of instantiating and calling _wait() manually. The __exit__()
guaranties to exit only after all child threads are done, even if
spawning code have thrown an exception
"""
def __init__(self, thread_pool_size=1000):
self.tg = greenpool.GreenPool(size=thread_pool_size)
self.exc = None
self.failed_thread = None
self.threads = 0
self.cv = threading.Condition()
def spawn(self, thread_description, func, *args, **kwargs):
self.tg.spawn(_wrapper, current().clone(), thread_description,
self, func, *args, **kwargs)
with self.cv:
self.threads += 1
def _on_thread_exit(self):
with self.cv:
self.threads -= 1
if self.threads == 0:
self.cv.notifyAll()
# NOTE(dmitryme): A little rationale on why we reimplemented wait():
# * Eventlet's GreenPool.wait() can hung
# * Oslo's ThreadGroup.wait() can exit before all threads are done
#
def _wait(self):
"""Using of _wait() method.
It is preferred to use the class as a context manager and do not
use _wait() directly, see class docstring for an explanation.
"""
with self.cv:
while self.threads > 0:
self.cv.wait()
if self.exc:
raise ex.ThreadException(self.failed_thread, self.exc)
def __enter__(self):
return self
def __exit__(self, *ex):
if not any(ex):
self._wait()
else:
# If spawning code thrown an exception, it had higher priority
# for us than the one thrown inside child thread (if any)
try:
self._wait()
except Exception:
# that will make __exit__ throw original exception
pass
def sleep(seconds=0):
time.sleep(seconds)
class InstanceInfo(object):
def __init__(self, cluster_id=None, instance_id=None, instance_name=None,
node_group_id=None, step_type=None, step_id=None):
self.cluster_id = cluster_id
self.instance_id = instance_id
self.instance_name = instance_name
self.node_group_id = node_group_id
self.step_type = step_type
self.step_id = step_id
def set_step_type(step_type):
current().current_instance_info.step_type = step_type
class InstanceInfoManager(object):
def __init__(self, instance_info):
self.prev_instance_info = current().current_instance_info
if not instance_info.step_type:
instance_info.step_type = self.prev_instance_info.step_type
if not instance_info.step_id:
instance_info.step_id = self.prev_instance_info.step_id
current().current_instance_info = instance_info
def __enter__(self):
pass
def __exit__(self, *args):
current().current_instance_info = self.prev_instance_info
def set_current_cluster_id(cluster_id):
current().resource_uuid = 'none, cluster: %s' % cluster_id
def set_current_job_execution_id(je_id):
current().resource_uuid = 'none, job_execution: %s' % je_id
class SetCurrentInstanceId(object):
def __init__(self, instance_id):
ctx = current()
self.prev_uuid = ctx.resource_uuid
if ctx.resource_uuid:
ctx.resource_uuid = ctx.resource_uuid.replace('none', instance_id)
context.get_current().resource_uuid = ctx.resource_uuid
def __enter__(self):
pass
def __exit__(self, *ex):
current().resource_uuid = self.prev_uuid
context.get_current().resource_uuid = self.prev_uuid
def set_current_instance_id(instance_id):
return SetCurrentInstanceId(instance_id)
| 30.775148
| 78
| 0.622092
|
38f542cdd53fae9f7d670316f1b796ca1e9fa17a
| 241
|
py
|
Python
|
draugr/drawers/mpl_drawers/spectral/__init__.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 3
|
2019-09-27T08:04:59.000Z
|
2020-12-02T06:14:45.000Z
|
draugr/drawers/mpl_drawers/spectral/__init__.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 64
|
2019-09-27T08:03:42.000Z
|
2022-03-28T15:07:30.000Z
|
draugr/drawers/mpl_drawers/spectral/__init__.py
|
cnHeider/draugr
|
b95e0bb1fa5efa581bfb28ff604f296ed2e6b7d6
|
[
"Apache-2.0"
] | 1
|
2020-10-01T00:18:57.000Z
|
2020-10-01T00:18:57.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 03/04/2020
"""
from .fast_fourier_transform import *
from .fast_fourier_transform_spectrogram import *
| 20.083333
| 49
| 0.659751
|
e5d95e711a7485f9534860fa422255585c8fe0ba
| 2,510
|
py
|
Python
|
geography/management/commands/bootstrap_geom_hi.py
|
The-Politico/politico-civic-geography
|
032b3ee773b50b65cfe672f230dda772df0f89e0
|
[
"MIT"
] | 1
|
2018-07-31T13:54:38.000Z
|
2018-07-31T13:54:38.000Z
|
geography/management/commands/bootstrap_geom_hi.py
|
The-Politico/politico-civic-geography
|
032b3ee773b50b65cfe672f230dda772df0f89e0
|
[
"MIT"
] | 7
|
2018-04-18T20:57:40.000Z
|
2021-06-10T20:50:40.000Z
|
geography/management/commands/bootstrap_geom_hi.py
|
The-Politico/politico-civic-geography
|
032b3ee773b50b65cfe672f230dda772df0f89e0
|
[
"MIT"
] | 1
|
2018-12-15T20:17:52.000Z
|
2018-12-15T20:17:52.000Z
|
import os
import geojson
import shapefile
from django.core.management.base import BaseCommand
from geography.models import Division, DivisionLevel, Geometry
from geography.utils.lookups import county_lookup
from .bootstrap._arguments import ArgumentsMethods
from .bootstrap._attributes import Attributes
from .bootstrap._toposimplify import Toposimplify
SHP_SLUG = "clipped_hi"
STATE_LEVEL = DivisionLevel.STATE
class Command(Toposimplify, ArgumentsMethods, Attributes, BaseCommand):
def get_county_shp(self, fips):
cmd_path = os.path.dirname(os.path.realpath(__file__))
SHAPEFILE_PATH = os.path.join(cmd_path, "../../bin/hi")
shape = shapefile.Reader(
os.path.join(SHAPEFILE_PATH, "{}.shp".format(SHP_SLUG))
)
fields = shape.fields[1:]
field_names = [f[0] for f in fields]
county_records = [
shp
for shp in shape.shapeRecords()
if dict(zip(field_names, shp.record))["STATEFP"] == fips
]
features = []
for shp in county_records:
rec = dict(zip(field_names, shp.record))
geometry = shp.shape.__geo_interface__
geodata = {
"type": "Feature",
"geometry": geometry,
"properties": {
"state": rec["STATEFP"],
"county": rec["COUNTYFP"],
"name": county_lookup[rec["STATEFP"]].get(
rec["COUNTYFP"], rec["NAME"]
),
},
}
features.append(geodata)
threshold = (
self.THRESHOLDS["nation"]
if fips == "00"
else self.THRESHOLDS["county"]
)
return self.toposimplify(
geojson.FeatureCollection(features), threshold
)
def handle(self, *args, **options):
self.set_attributes()
self.parse_arguments(options)
state = Division.objects.get(code=15, level__name=DivisionLevel.STATE)
geojson, created = Geometry.objects.update_or_create(
division=state,
subdivision_level=self.COUNTY_LEVEL,
simplification=0.075,
source="https://www2.census.gov/geo/tiger/GENZ2016/shp/cb_2016_us_state_500k.zip", # noqa
series="2016",
defaults={"topojson": self.get_county_shp("15")},
)
# TODO: District geometries
self.stdout.write(self.style.SUCCESS("All done! 🏁"))
| 33.026316
| 102
| 0.59004
|
e306f300b5a6114510052fbb3d04393324526000
| 837
|
gyp
|
Python
|
binding.gyp
|
SlimIO/Nixfs
|
20e198c96231485197ff60161f3c71c70cbc01d1
|
[
"MIT"
] | null | null | null |
binding.gyp
|
SlimIO/Nixfs
|
20e198c96231485197ff60161f3c71c70cbc01d1
|
[
"MIT"
] | 115
|
2019-10-01T17:28:19.000Z
|
2021-08-02T04:46:26.000Z
|
binding.gyp
|
SlimIO/Nixfs
|
20e198c96231485197ff60161f3c71c70cbc01d1
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "nixfs",
"sources": [
"nixfs.cpp"
],
"include_dirs": [
"include",
"<!@(node -p \"require('node-addon-api').include\")"
],
"dependencies": [
"<!(node -p \"require('node-addon-api').gyp\")"
],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"CLANG_CXX_LIBRARY": "libc++",
"MACOSX_DEPLOYMENT_TARGET": "10.7"
},
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": 1
}
}
}
]
}
| 27.9
| 68
| 0.364397
|
80607331a1ffa3f85ff0e958a0bc12aa4904daab
| 16,738
|
py
|
Python
|
compiler/pgates/pnand4.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 335
|
2018-03-13T21:05:22.000Z
|
2022-03-30T07:53:25.000Z
|
compiler/pgates/pnand4.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 87
|
2018-03-06T00:55:51.000Z
|
2022-03-30T19:38:29.000Z
|
compiler/pgates/pnand4.py
|
im-world/OpenRAM
|
f66aac3264598eeae31225c62b6a4af52412d407
|
[
"BSD-3-Clause"
] | 95
|
2018-03-14T16:22:55.000Z
|
2022-03-24T00:34:37.000Z
|
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import pgate
import debug
from tech import drc, parameter, spice
from vector import vector
import logical_effort
from sram_factory import factory
import contact
from tech import cell_properties as cell_props
class pnand4(pgate.pgate):
"""
This module generates gds of a parametrically sized 4-input nand.
This model use ptx to generate a 4-input nand within a cetrain height.
"""
def __init__(self, name, size=1, height=None, add_wells=True):
""" Creates a cell for a simple 3 input nand """
debug.info(2,
"creating pnand4 structure {0} with size of {1}".format(name,
size))
self.add_comment("size: {}".format(size))
# We have trouble pitch matching a 3x sizes to the bitcell...
# If we relax this, we could size this better.
self.size = size
self.nmos_size = 2 * size
self.pmos_size = parameter["beta"] * size
self.nmos_width = self.nmos_size * drc("minwidth_tx")
self.pmos_width = self.pmos_size * drc("minwidth_tx")
# FIXME: Allow these to be sized
debug.check(size == 1,
"Size 1 pnand4 is only supported now.")
self.tx_mults = 1
if cell_props.ptx.bin_spice_models:
self.nmos_width = self.nearest_bin("nmos", self.nmos_width)
self.pmos_width = self.nearest_bin("pmos", self.pmos_width)
# Creates the netlist and layout
super().__init__(name, height, add_wells)
def add_pins(self):
""" Adds pins for spice netlist """
pin_list = ["A", "B", "C", "D", "Z", "vdd", "gnd"]
dir_list = ["INPUT", "INPUT", "INPUT", "INPUT", "OUTPUT", "POWER", "GROUND"]
self.add_pin_list(pin_list, dir_list)
def create_netlist(self):
self.add_pins()
self.add_ptx()
self.create_ptx()
def create_layout(self):
""" Calls all functions related to the generation of the layout """
self.setup_layout_constants()
self.place_ptx()
if self.add_wells:
self.add_well_contacts()
self.route_inputs()
self.route_output()
self.determine_width()
self.route_supply_rails()
self.connect_rails()
self.extend_wells()
self.add_boundary()
def add_ptx(self):
""" Create the PMOS and NMOS transistors. """
self.nmos_center = factory.create(module_type="ptx",
width=self.nmos_width,
mults=self.tx_mults,
tx_type="nmos",
add_source_contact="active",
add_drain_contact="active")
self.add_mod(self.nmos_center)
self.nmos_right = factory.create(module_type="ptx",
width=self.nmos_width,
mults=self.tx_mults,
tx_type="nmos",
add_source_contact="active",
add_drain_contact=self.route_layer)
self.add_mod(self.nmos_right)
self.nmos_left = factory.create(module_type="ptx",
width=self.nmos_width,
mults=self.tx_mults,
tx_type="nmos",
add_source_contact=self.route_layer,
add_drain_contact="active")
self.add_mod(self.nmos_left)
self.pmos_left = factory.create(module_type="ptx",
width=self.pmos_width,
mults=self.tx_mults,
tx_type="pmos",
add_source_contact=self.route_layer,
add_drain_contact=self.route_layer)
self.add_mod(self.pmos_left)
self.pmos_center = factory.create(module_type="ptx",
width=self.pmos_width,
mults=self.tx_mults,
tx_type="pmos",
add_source_contact=self.route_layer,
add_drain_contact=self.route_layer)
self.add_mod(self.pmos_center)
self.pmos_right = factory.create(module_type="ptx",
width=self.pmos_width,
mults=self.tx_mults,
tx_type="pmos",
add_source_contact=self.route_layer,
add_drain_contact=self.route_layer)
self.add_mod(self.pmos_right)
def setup_layout_constants(self):
""" Pre-compute some handy layout parameters. """
# Compute the overlap of the source and drain pins
self.ptx_offset = self.pmos_left.get_pin("D").center() - self.pmos_left.get_pin("S").center()
# This is the extra space needed to ensure DRC rules
# to the active contacts
nmos = factory.create(module_type="ptx", tx_type="nmos")
def create_ptx(self):
"""
Create the PMOS and NMOS in the netlist.
"""
self.pmos1_inst = self.add_inst(name="pnand4_pmos1",
mod=self.pmos_left)
self.connect_inst(["vdd", "A", "Z", "vdd"])
self.pmos2_inst = self.add_inst(name="pnand4_pmos2",
mod=self.pmos_center)
self.connect_inst(["Z", "B", "vdd", "vdd"])
self.pmos3_inst = self.add_inst(name="pnand4_pmos3",
mod=self.pmos_center)
self.connect_inst(["Z", "C", "vdd", "vdd"])
self.pmos4_inst = self.add_inst(name="pnand4_pmos4",
mod=self.pmos_right)
self.connect_inst(["Z", "D", "vdd", "vdd"])
self.nmos1_inst = self.add_inst(name="pnand4_nmos1",
mod=self.nmos_left)
self.connect_inst(["Z", "D", "net1", "gnd"])
self.nmos2_inst = self.add_inst(name="pnand4_nmos2",
mod=self.nmos_center)
self.connect_inst(["net1", "C", "net2", "gnd"])
self.nmos3_inst = self.add_inst(name="pnand4_nmos3",
mod=self.nmos_center)
self.connect_inst(["net2", "B", "net3", "gnd"])
self.nmos4_inst = self.add_inst(name="pnand4_nmos4",
mod=self.nmos_right)
self.connect_inst(["net3", "A", "gnd", "gnd"])
def place_ptx(self):
"""
Place the PMOS and NMOS in the layout at the upper-most
and lowest position to provide maximum routing in channel
"""
pmos1_pos = vector(self.pmos_left.active_offset.x,
self.height - self.pmos_left.active_height - self.top_bottom_space)
self.pmos1_inst.place(pmos1_pos)
pmos2_pos = pmos1_pos + self.ptx_offset
self.pmos2_inst.place(pmos2_pos)
pmos3_pos = pmos2_pos + self.ptx_offset
self.pmos3_inst.place(pmos3_pos)
self.pmos4_pos = pmos3_pos + self.ptx_offset
self.pmos4_inst.place(self.pmos4_pos)
nmos1_pos = vector(self.pmos_left.active_offset.x,
self.top_bottom_space)
self.nmos1_inst.place(nmos1_pos)
nmos2_pos = nmos1_pos + self.ptx_offset
self.nmos2_inst.place(nmos2_pos)
nmos3_pos = nmos2_pos + self.ptx_offset
self.nmos3_inst.place(nmos3_pos)
self.nmos4_pos = nmos3_pos + self.ptx_offset
self.nmos4_inst.place(self.nmos4_pos)
def add_well_contacts(self):
""" Add n/p well taps to the layout and connect to supplies """
self.add_nwell_contact(self.pmos_right,
self.pmos4_pos + vector(self.m1_pitch, 0))
self.add_pwell_contact(self.nmos_right,
self.nmos4_pos + vector(self.m1_pitch, 0))
def connect_rails(self):
""" Connect the nmos and pmos to its respective power rails """
self.connect_pin_to_rail(self.nmos1_inst, "S", "gnd")
self.connect_pin_to_rail(self.pmos1_inst, "S", "vdd")
self.connect_pin_to_rail(self.pmos2_inst, "D", "vdd")
self.connect_pin_to_rail(self.pmos4_inst, "D", "vdd")
def route_inputs(self):
""" Route the A and B and C inputs """
# We can use this pitch because the contacts and overlap won't be adjacent
pmos_drain_bottom = self.pmos1_inst.get_pin("D").by()
self.output_yoffset = pmos_drain_bottom - 0.5 * self.route_layer_width - self.route_layer_space
bottom_pin = self.nmos1_inst.get_pin("D")
# active contact metal to poly contact metal spacing
active_contact_to_poly_contact = bottom_pin.uy() + self.m1_space + 0.5 * contact.poly_contact.second_layer_height
# active diffusion to poly contact spacing
# doesn't use nmos uy because that is calculated using offset + poly height
active_top = self.nmos1_inst.by() + self.nmos1_inst.mod.active_height
active_to_poly_contact = active_top + self.poly_to_active + 0.5 * contact.poly_contact.first_layer_height
active_to_poly_contact2 = active_top + self.poly_contact_to_gate + 0.5 * self.route_layer_width
self.inputA_yoffset = max(active_contact_to_poly_contact,
active_to_poly_contact,
active_to_poly_contact2)
apin = self.route_input_gate(self.pmos1_inst,
self.nmos1_inst,
self.inputA_yoffset,
"A",
position="left")
self.inputB_yoffset = self.inputA_yoffset + self.m1_pitch
bpin = self.route_input_gate(self.pmos2_inst,
self.nmos2_inst,
self.inputB_yoffset,
"B",
position="center")
self.inputC_yoffset = self.inputB_yoffset + self.m1_pitch
cpin = self.route_input_gate(self.pmos3_inst,
self.nmos3_inst,
self.inputC_yoffset,
"C",
position="right")
self.inputD_yoffset = self.inputC_yoffset + self.m1_pitch
dpin = self.route_input_gate(self.pmos4_inst,
self.nmos4_inst,
self.inputD_yoffset,
"D",
position="right")
if cell_props.pgate.add_implants:
self.add_enclosure([apin, bpin, cpin, dpin], "npc", drc("npc_enclose_poly"))
def route_output(self):
""" Route the Z output """
# PMOS1 drain
pmos1_pin = self.pmos1_inst.get_pin("D")
# PMOS3 drain
pmos3_pin = self.pmos3_inst.get_pin("D")
# NMOS3 drain
nmos4_pin = self.nmos4_inst.get_pin("D")
out_offset = vector(nmos4_pin.cx() + self.route_layer_pitch,
self.output_yoffset)
# Go up to metal2 for ease on all output pins
# self.add_via_center(layers=self.m1_stack,
# offset=pmos1_pin.center(),
# directions=("V", "V"))
# self.add_via_center(layers=self.m1_stack,
# offset=pmos3_pin.center(),
# directions=("V", "V"))
# self.add_via_center(layers=self.m1_stack,
# offset=nmos3_pin.center(),
# directions=("V", "V"))
# # Route in the A input track (top track)
# mid_offset = vector(nmos3_pin.center().x, self.inputA_yoffset)
# self.add_path("m1", [pmos1_pin.center(), mid_offset, nmos3_pin.uc()])
# This extends the output to the edge of the cell
# self.add_via_center(layers=self.m1_stack,
# offset=mid_offset)
top_left_pin_offset = pmos1_pin.center()
top_right_pin_offset = pmos3_pin.center()
bottom_pin_offset = nmos4_pin.center()
# PMOS1 to output
self.add_path(self.route_layer, [top_left_pin_offset,
vector(top_left_pin_offset.x, out_offset.y),
out_offset])
# PMOS4 to output
self.add_path(self.route_layer, [top_right_pin_offset,
vector(top_right_pin_offset.x, out_offset.y),
out_offset])
# NMOS4 to output
mid2_offset = vector(out_offset.x, bottom_pin_offset.y)
self.add_path(self.route_layer,
[bottom_pin_offset, mid2_offset],
width=nmos4_pin.height())
mid3_offset = vector(out_offset.x, nmos4_pin.by())
self.add_path(self.route_layer, [mid3_offset, out_offset])
self.add_layout_pin_rect_center(text="Z",
layer=self.route_layer,
offset=out_offset)
def analytical_power(self, corner, load):
"""Returns dynamic and leakage power. Results in nW"""
c_eff = self.calculate_effective_capacitance(load)
freq = spice["default_event_frequency"]
power_dyn = self.calc_dynamic_power(corner, c_eff, freq)
power_leak = spice["nand4_leakage"]
total_power = self.return_power(power_dyn, power_leak)
return total_power
def calculate_effective_capacitance(self, load):
"""Computes effective capacitance. Results in fF"""
c_load = load
# In fF
c_para = spice["min_tx_drain_c"] * (self.nmos_size / parameter["min_tx_size"])
transition_prob = 0.1094
return transition_prob * (c_load + c_para)
def input_load(self):
"""Return the relative input capacitance of a single input"""
return self.nmos_size + self.pmos_size
def get_stage_effort(self, cout, inp_is_rise=True):
"""
Returns an object representing the parameters for delay in tau units.
Optional is_rise refers to the input direction rise/fall.
Input inverted by this stage.
"""
parasitic_delay = 3
return logical_effort.logical_effort(self.name,
self.size,
self.input_load(),
cout,
parasitic_delay,
not inp_is_rise)
def build_graph(self, graph, inst_name, port_nets):
"""
Adds edges based on inputs/outputs.
Overrides base class function.
"""
self.add_graph_edges(graph, port_nets)
def get_on_resistance(self):
"""On resistance of pnand, defined by stacked NMOS"""
is_nchannel = True
stack = 4
is_cell = False
return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)
def get_input_capacitance(self):
"""Input cap of input, passes width of gates to gate cap function"""
return self.gate_c(self.nmos_width+self.pmos_width)
def get_intrinsic_capacitance(self):
"""Get the drain capacitances of the TXs in the gate."""
nmos_stack = 4
nmos_drain_c = self.drain_c_(self.nmos_width*self.tx_mults,
nmos_stack,
self.tx_mults)
pmos_drain_c = self.drain_c_(self.pmos_width*self.tx_mults,
1,
self.tx_mults)
return nmos_drain_c + pmos_drain_c
| 42.590331
| 121
| 0.539252
|
6efe3cf78e53cb52649e15717cd3d21439a353f2
| 15,463
|
py
|
Python
|
config_custom_ruleset_lambda_fucntions/AMI_NOT_PUBLIC_CHECK/AMI_NOT_PUBLIC_CHECK.py
|
aditya-/DevSecOps_Cloud_Automation_AWS
|
1b0c1f4a20f17648595819b3e31370840a56db6a
|
[
"MIT"
] | 3
|
2019-09-02T14:24:49.000Z
|
2022-02-23T16:22:12.000Z
|
config_custom_ruleset_lambda_fucntions/AMI_NOT_PUBLIC_CHECK/AMI_NOT_PUBLIC_CHECK.py
|
aditya-/DevSecOps_Cloud_Automation_AWS
|
1b0c1f4a20f17648595819b3e31370840a56db6a
|
[
"MIT"
] | null | null | null |
config_custom_ruleset_lambda_fucntions/AMI_NOT_PUBLIC_CHECK/AMI_NOT_PUBLIC_CHECK.py
|
aditya-/DevSecOps_Cloud_Automation_AWS
|
1b0c1f4a20f17648595819b3e31370840a56db6a
|
[
"MIT"
] | 1
|
2019-10-14T19:38:48.000Z
|
2019-10-14T19:38:48.000Z
|
'''
#####################################
## Gherkin ##
#####################################
Rule Name:
AMI_NOT_PUBLIC_CHECK
Description:
Check whether the Amazon Machine Images are not publicly accessible. The rule is NON_COMPLIANT if one or more Amazon Machine Images are publicly accessible.
Trigger:
Periodic
Reports on:
AWS::::Account
Rule Parameters:
None
Scenarios:
Scenario: 1
Given: No AMIs with "is-public" parameter set to True
Then: Return COMPLIANT
Scenario: 2
Given: One or more AMIs with is-public parameter set to True
Then: Return NON_COMPLIANT with Annotation containing AMI IDs
'''
import json
import sys
import datetime
import boto3
import botocore
try:
import liblogging
except ImportError:
pass
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
# Set to True to get the lambda to assume the Role attached on the Config Service (useful for cross-account).
ASSUME_ROLE_MODE = False
# Other parameters (no change needed)
CONFIG_ROLE_TIMEOUT_SECONDS = 900
# Generates list of image_id's of public images
def generate_image_id_list(images, event):
image_ids = []
for image in images:
image_ids.append(image['ImageId'])
return image_ids
def build_annotation(annotation_string):
if len(annotation_string) > 256:
return annotation_string[:244] + " [truncated]"
return annotation_string
def evaluate_compliance(event, configuration_item, valid_rule_parameters):
ec2_client = get_client('ec2', event)
public_ami_result = ec2_client.describe_images(
Filters=[
{
'Name': 'is-public',
'Values': ['true']
}
],
Owners=[event['accountId']]
)
# If public_ami_list is not empty, generate non-compliant response
if public_ami_result['Images']:
evaluations = []
evaluations.append(
build_evaluation(
event['accountId'],
'NON_COMPLIANT',
event,
annotation='Public Amazon Machine Image Id: {}'.format(",".join([image_id for image_id in generate_image_id_list(public_ami_result['Images'], event)]))
)
)
return evaluations
return build_evaluation(event['accountId'], "COMPLIANT", event)
def evaluate_parameters(rule_parameters):
valid_rule_parameters = rule_parameters
return valid_rule_parameters
####################
# Helper Functions #
####################
# Build an error to be displayed in the logs when the parameter is invalid.
def build_parameters_value_error_response(ex):
return build_error_response(internal_error_message="Parameter value is invalid",
internal_error_details="An ValueError was raised during the validation of the Parameter value",
customer_error_code="InvalidParameterValueException",
customer_error_message=str(ex))
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event):
if not ASSUME_ROLE_MODE:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, event, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
eval_cc = {}
if annotation:
eval_cc['Annotation'] = build_annotation(annotation)
eval_cc['ComplianceResourceType'] = resource_type
eval_cc['ComplianceResourceId'] = resource_id
eval_cc['ComplianceType'] = compliance_type
eval_cc['OrderingTimestamp'] = str(json.loads(event['invokingEvent'])['notificationCreationTime'])
return eval_cc
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
eval_ci = {}
if annotation:
eval_ci['Annotation'] = build_annotation(annotation)
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
####################
# Boilerplate Code #
####################
# Helper function used to validate input
def check_defined(reference, reference_name):
if not reference:
raise Exception('Error: ', reference_name, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(message_type):
check_defined(message_type, 'messageType')
return message_type == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resource_type, resource_id, configuration_capture_time):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resource_type,
resourceId=resource_id,
laterTime=configuration_capture_time,
limit=1)
configuration_item = result['configurationItems'][0]
return convert_api_configuration(configuration_item)
# Convert from the API model to the original invocation model
def convert_api_configuration(configuration_item):
for k, v in configuration_item.items():
if isinstance(v, datetime.datetime):
configuration_item[k] = str(v)
configuration_item['awsAccountId'] = configuration_item['accountId']
configuration_item['ARN'] = configuration_item['arn']
configuration_item['configurationStateMd5Hash'] = configuration_item['configurationItemMD5Hash']
configuration_item['configurationItemVersion'] = configuration_item['version']
configuration_item['configuration'] = json.loads(configuration_item['configuration'])
if 'relationships' in configuration_item:
for i in range(len(configuration_item['relationships'])):
configuration_item['relationships'][i]['name'] = configuration_item['relationships'][i]['relationshipName']
return configuration_item
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invoking_event):
check_defined(invoking_event, 'invokingEvent')
if is_oversized_changed_notification(invoking_event['messageType']):
configuration_item_summary = check_defined(invoking_event['configuration_item_summary'], 'configurationItemSummary')
return get_configuration(configuration_item_summary['resourceType'], configuration_item_summary['resourceId'], configuration_item_summary['configurationItemCaptureTime'])
if is_scheduled_notification(invoking_event['messageType']):
return None
return check_defined(invoking_event['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configuration_item, event):
try:
check_defined(configuration_item, 'configurationItem')
check_defined(event, 'event')
except:
return True
status = configuration_item['configurationItemStatus']
event_left_scope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return status in ('OK', 'ResourceDiscovered') and not event_left_scope
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn,
RoleSessionName="configLambdaExecution",
DurationSeconds=CONFIG_ROLE_TIMEOUT_SECONDS)
if 'liblogging' in sys.modules:
liblogging.logSession(role_arn, assume_role_response)
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
print(str(ex))
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This removes older evaluation (usually useful for periodic rule not reporting on AWS::::Account).
def clean_up_old_evaluations(latest_evaluations, event):
cleaned_evaluations = []
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100)
old_eval_list = []
while True:
for old_result in old_eval['EvaluationResults']:
old_eval_list.append(old_result)
if 'NextToken' in old_eval:
next_token = old_eval['NextToken']
old_eval = AWS_CONFIG_CLIENT.get_compliance_details_by_config_rule(
ConfigRuleName=event['configRuleName'],
ComplianceTypes=['COMPLIANT', 'NON_COMPLIANT'],
Limit=100,
NextToken=next_token)
else:
break
for old_eval in old_eval_list:
old_resource_id = old_eval['EvaluationResultIdentifier']['EvaluationResultQualifier']['ResourceId']
newer_founded = False
for latest_eval in latest_evaluations:
if old_resource_id == latest_eval['ComplianceResourceId']:
newer_founded = True
if not newer_founded:
cleaned_evaluations.append(build_evaluation(old_resource_id, "NOT_APPLICABLE", event))
return cleaned_evaluations + latest_evaluations
def lambda_handler(event, context):
if 'liblogging' in sys.modules:
liblogging.logEvent(event)
global AWS_CONFIG_CLIENT
#print(event)
check_defined(event, 'event')
invoking_event = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
try:
valid_rule_parameters = evaluate_parameters(rule_parameters)
except ValueError as ex:
return build_parameters_value_error_response(ex)
try:
AWS_CONFIG_CLIENT = get_client('config', event)
if invoking_event['messageType'] in ['ConfigurationItemChangeNotification', 'ScheduledNotification', 'OversizedConfigurationItemChangeNotification']:
configuration_item = get_configuration_item(invoking_event)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(event, configuration_item, valid_rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
else:
return build_internal_error_response('Unexpected message type', str(invoking_event))
except botocore.exceptions.ClientError as ex:
if is_internal_error(ex):
return build_internal_error_response("Unexpected error while completing API request", str(ex))
return build_error_response("Customer error while making API request", str(ex), ex.response['Error']['Code'], ex.response['Error']['Message'])
except ValueError as ex:
return build_internal_error_response(str(ex), str(ex))
evaluations = []
latest_evaluations = []
if not compliance_result:
latest_evaluations.append(build_evaluation(event['accountId'], "NOT_APPLICABLE", event, resource_type='AWS::::Account'))
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, str):
if configuration_item:
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
else:
evaluations.append(build_evaluation(event['accountId'], compliance_result, event, resource_type=DEFAULT_RESOURCE_TYPE))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
latest_evaluations.append(evaluation)
evaluations = clean_up_old_evaluations(latest_evaluations, event)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
result_token = event['resultToken']
test_mode = False
if result_token == 'TESTMODE':
# Used solely for RDK test to skip actual put_evaluation API call
test_mode = True
# Invoke the Config API to report the result of the evaluation
evaluation_copy = []
evaluation_copy = evaluations[:]
while evaluation_copy:
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluation_copy[:100], ResultToken=result_token, TestMode=test_mode)
del evaluation_copy[:100]
# Used solely for RDK test to be able to test Lambda function
return evaluations
def is_internal_error(exception):
return ((not isinstance(exception, botocore.exceptions.ClientError)) or exception.response['Error']['Code'].startswith('5')
or 'InternalError' in exception.response['Error']['Code'] or 'ServiceError' in exception.response['Error']['Code'])
def build_internal_error_response(internal_error_message, internal_error_details=None):
return build_error_response(internal_error_message, internal_error_details, 'InternalError', 'InternalError')
def build_error_response(internal_error_message, internal_error_details=None, customer_error_code=None, customer_error_message=None):
error_response = {
'internalErrorMessage': internal_error_message,
'internalErrorDetails': internal_error_details,
'customerErrorMessage': customer_error_message,
'customerErrorCode': customer_error_code
}
print(error_response)
return error_response
| 42.833795
| 178
| 0.706267
|
b9970fbcb4163a4c8ec99dca7fd36bd2a6e95059
| 438
|
py
|
Python
|
python/rabbit/Scripts/easy_install-script.py
|
kindrabbit/programming
|
2c9b7e24e33ecc174c2efb51727b3886ebc00acf
|
[
"Apache-2.0"
] | 1
|
2021-01-24T02:07:34.000Z
|
2021-01-24T02:07:34.000Z
|
python/rabbit/Scripts/easy_install-script.py
|
kindrabbit/programming
|
2c9b7e24e33ecc174c2efb51727b3886ebc00acf
|
[
"Apache-2.0"
] | null | null | null |
python/rabbit/Scripts/easy_install-script.py
|
kindrabbit/programming
|
2c9b7e24e33ecc174c2efb51727b3886ebc00acf
|
[
"Apache-2.0"
] | null | null | null |
#!E:\thomas_program\python\rabbit\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| 33.692308
| 83
| 0.694064
|
ddc2ee85ae38761d55a9f35e1161725df1c47108
| 1,196
|
py
|
Python
|
hello_network.py
|
xlsong2002/test
|
ddaacb2669637b5355d69cd9c788cc8fe261df45
|
[
"MIT"
] | 2
|
2021-02-16T19:19:01.000Z
|
2021-05-07T04:50:43.000Z
|
hello_network.py
|
xlsong2002/test
|
ddaacb2669637b5355d69cd9c788cc8fe261df45
|
[
"MIT"
] | null | null | null |
hello_network.py
|
xlsong2002/test
|
ddaacb2669637b5355d69cd9c788cc8fe261df45
|
[
"MIT"
] | 15
|
2019-03-26T08:55:38.000Z
|
2022-02-19T21:10:46.000Z
|
#!/usr/bin/env python
"""Simple hello world (network) script.
Script Dependencies:
none
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
print("Hello Network!")
| 39.866667
| 78
| 0.79097
|
fd4f84f3ec61fbebc171abbca59803df686df1a0
| 1,666
|
py
|
Python
|
tools/picker/blades/console/write.py
|
laurent-malvert/lemona
|
0b8b6364932c1e8865b3cdfd0f475cbff7584ab2
|
[
"X11",
"MIT"
] | null | null | null |
tools/picker/blades/console/write.py
|
laurent-malvert/lemona
|
0b8b6364932c1e8865b3cdfd0f475cbff7584ab2
|
[
"X11",
"MIT"
] | null | null | null |
tools/picker/blades/console/write.py
|
laurent-malvert/lemona
|
0b8b6364932c1e8865b3cdfd0f475cbff7584ab2
|
[
"X11",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
##
## This file is part of Lemona.
## Copyright (C) 2008 Kenfe-Mickaël Laventure
##
## The contents of this file are subject to the terms of either the
## GNU General Public License Version 2 ("GPL") or the MIT License
## (collectively, the "License"). You may not use this file except in
## compliance with the License. You can obtain a copy of the License
## at http://www.opensource.org/licenses/gpl-2.0.php and
## http://www.opensource.org/licenses/mit-license.php or GPL.LICENSE
## and MIT.LICENSE. See the License for the specific language
## governing permissions and limitations under the License.
##
from struct import unpack_from
from blades import console
sysnr = { "2.6.26.3" : 4 }
sysname = "__NR_write"
def ProcessIn(z):
# some sanity check
if z.argnr != 3 or z.extnr != 0:
raise NameError("Invalid '%s' Zest (In)" % sysname, z)
#fd & count (i.e. buffer size)
print " %s | %s " % ("FD".center(37), "BUFFER SIZE".center(37))
print "-" * 80
fd = unpack_from("i", z.args[0])[0]
count = unpack_from("i", z.args[1])[0]
print " %s | %s " % (str(fd).center(37), str(count).center(37))
print "-" * 80
if count > 0:
console.PrintBuffer("BUFFER", z.args[2], z.argsz[2])
def ProcessOut(z):
# some sanity check
if z.argnr != 1 or z.extnr != 0:
raise NameError("Invalid '%s' Zest (Out)" % sysname, zest)
ret = unpack_from("i", z.args[0])[0]
print " %s | %s " % ("RETURNED".center(17), str(ret).center(58))
print "-" * 80
def Process(zest):
if zest.inout == True:
ProcessIn(zest)
else:
ProcessOut(zest)
| 33.32
| 69
| 0.617647
|
a97e7ab2a971714f69c593ec404449aca01062ab
| 7,276
|
py
|
Python
|
splunk-custom.py
|
ashbyca/rsc-admin
|
7af545930ecce054befe2b076a36ef45d3077119
|
[
"MIT"
] | null | null | null |
splunk-custom.py
|
ashbyca/rsc-admin
|
7af545930ecce054befe2b076a36ef45d3077119
|
[
"MIT"
] | 1
|
2018-12-19T17:19:51.000Z
|
2018-12-19T17:20:09.000Z
|
splunk-custom.py
|
ashbyca/misc-scripts
|
7af545930ecce054befe2b076a36ef45d3077119
|
[
"MIT"
] | null | null | null |
############## CONFIGURATION ########################################################################
'''
The purpose of this custom python code is to create PTR / TRAP Alerts from Splunk Alerts
Step 1: Upload this code to Main Menu -> Scripts -> ETL Scripts
Step 2: Create a PTR Scripted Listener Event Source
Step 2a: Select the script uploaded in Step 1 -> SAVE
Step 2b: NOTE the POST URL exposed after SAVE... You will need this url for the next step
Step 3: Configure SPLUNK to send Alerts to the PTR using the POST URL from STEP 2b.
'''
############## jyun@proofpoint.com ##################################################################
import requests
import json
import datetime
import json_sdk
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def create_alert(alert_description,alert_severity,search_name,attacker_ip=None,user_account=None):
alert=json_sdk.Alert()
alert.set_description(alert_description)
alert.set_severity(alert_severity)
alert.set_threat_info(json_sdk.ThreatInfo(
name=search_name,
threat_type="Splunk Alert",
occurred_at=datetime.datetime.utcnow().isoformat()+"Z"
))
if attacker_ip!=None:
alert.set_attacker(json_sdk.Attacker(
ip_address=attacker_ip
))
alert.add_custom_field('Attacker IP',attacker_ip)
if user_account!=None:
alert.set_target(json_sdk.Target(
user=user_account
))
alert.add_custom_field('User',user_account)
alert.set_detector(json_sdk.Detector(
product='SIEM',
vendor='Splunk'
))
alert.add_custom_field('Summary',search_name)
return alert
def parse_alert():
postdata1=ptr.DEVICE_ALERT_DATA
print(postdata1)
postdata=json.loads(postdata1)
try:
typealert = postdata["search_name"]
print("typealert: ", typealert)
if typealert == "CloudTrail Alert: Unauthorized Actions":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
elif typealert == "Geographically Improbable Access":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
attacker_ip=postdata["result"]["src"]
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert,attacker_ip=attacker_ip)
return result
elif typealert == "Suspected Network Scanning":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="low"
attacker_ip=postdata["result"]["src_ip"]
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert,attacker_ip=attacker_ip)
return result
elif typealert == "Locked Out Accounts":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="low"
user_account=postdata["result"]["User Account"]
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert,user_account=user_account)
return result
elif typealert == "Okta Detected IP Threat":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="low"
attacker_ip=postdata["result"]["src_ip"]
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert,attacker_ip=attacker_ip)
return result
elif typealert == "CloudTrail Alert: IAM: Create/Delete/Update Access Keys":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
elif typealert == "CloudTrail Alert: Security Groups: Create/Delete Groups":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
elif typealert == "CloudTrail Alert: IAM: Create/Delete Roles":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
elif typealert == "CloudTrail Alert: Key Pairs: Create/Delete/Import Key Pairs":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
elif typealert == "High Number of KTG Requests":
results_link=postdata["results_link"]
threat_description=postdata["result"]
alert_description=typealert+"\n"+results_link+"\n "+ json.dumps(threat_description,indent=4)
alert_severity="high"
result=create_alert(alert_description=alert_description,alert_severity=alert_severity,search_name=typealert)
return result
else:
alert_description="NO ALERT MATCH...... "
alert_severity="low"
result=create_alert(alert_description=typealert+"\n"+alert_description,alert_severity=alert_severity,search_name=typealert)
return result
except Exception as e:
print("Exception:", e)
print("NO action defined...ignoring")
return
| 48.506667
| 146
| 0.669874
|
a2bb2f1f961189d04343533e8367c3b4dc753b2d
| 17,600
|
py
|
Python
|
neutron/tests/fullstack/resources/environment.py
|
tankertyp/openstack-learning
|
d729672663f170d0138ecf23b3c23df225c1b1b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/fullstack/resources/environment.py
|
tankertyp/openstack-learning
|
d729672663f170d0138ecf23b3c23df225c1b1b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/fullstack/resources/environment.py
|
tankertyp/openstack-learning
|
d729672663f170d0138ecf23b3c23df225c1b1b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from neutron_lib import constants
from neutronclient.common import exceptions as nc_exc
from oslo_config import cfg
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as lb_agent
from neutron.tests.common.exclusive_resources import ip_address
from neutron.tests.common.exclusive_resources import ip_network
from neutron.tests.common import net_helpers
from neutron.tests.fullstack.resources import config
from neutron.tests.fullstack.resources import process
class EnvironmentDescription(object):
"""A set of characteristics of an environment setup.
Does the setup, as a whole, support tunneling? How about l2pop?
"""
def __init__(self, network_type='vxlan', l2_pop=True, qos=False,
mech_drivers='openvswitch,linuxbridge',
service_plugins='router', arp_responder=False,
agent_down_time=75, router_scheduler=None,
global_mtu=constants.DEFAULT_NETWORK_MTU,
debug_iptables=False, log=False, report_bandwidths=False,
has_placement=False, placement_port=None,
dhcp_scheduler_class=None, ml2_extension_drivers=None):
self.network_type = network_type
self.l2_pop = l2_pop
self.qos = qos
self.log = log
self.network_range = None
self.mech_drivers = mech_drivers
self.arp_responder = arp_responder
self.agent_down_time = agent_down_time
self.router_scheduler = router_scheduler
self.global_mtu = global_mtu
self.service_plugins = service_plugins
self.debug_iptables = debug_iptables
self.report_bandwidths = report_bandwidths
self.has_placement = has_placement
self.placement_port = placement_port
self.dhcp_scheduler_class = dhcp_scheduler_class
if self.qos:
self.service_plugins += ',qos'
if self.log:
self.service_plugins += ',log'
self.ml2_extension_drivers = ml2_extension_drivers
@property
def tunneling_enabled(self):
return self.network_type in ('vxlan', 'gre')
class HostDescription(object):
"""A set of characteristics of an environment Host.
What agents should the host spawn? What mode should each agent operate
under?
"""
def __init__(self, l3_agent=False, dhcp_agent=False,
l2_agent_type=constants.AGENT_TYPE_OVS,
firewall_driver='noop', availability_zone=None,
l3_agent_mode=None,
l3_agent_extensions=None):
self.l2_agent_type = l2_agent_type
self.l3_agent = l3_agent
self.dhcp_agent = dhcp_agent
self.firewall_driver = firewall_driver
self.availability_zone = availability_zone
self.l3_agent_mode = l3_agent_mode
self.l3_agent_extensions = l3_agent_extensions
class Host(fixtures.Fixture):
"""The Host class models a physical host running agents, all reporting with
the same hostname.
OpenStack installers or administrators connect compute nodes to the
physical tenant network by connecting the provider bridges to their
respective physical NICs. Or, if using tunneling, by configuring an
IP address on the appropriate physical NIC. The Host class does the same
with the connect_* methods.
TODO(amuller): Add start/stop/restart methods that will start/stop/restart
all of the agents on this host. Add a kill method that stops all agents
and disconnects the host from other hosts.
"""
def __init__(self, env_desc, host_desc, test_name,
neutron_config, central_bridge):
self.env_desc = env_desc
self.host_desc = host_desc
self.test_name = test_name
self.neutron_config = neutron_config
self.central_bridge = central_bridge
self.host_namespace = None
self.agents = {}
# we need to cache already created "per network" bridges if linuxbridge
# agent is used on host:
self.network_bridges = {}
def _setUp(self):
self.local_ip = self.allocate_local_ip()
if self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS:
self.setup_host_with_ovs_agent()
elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_NIC_SWITCH:
self.setup_host_with_sriov_agent()
elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE:
self.setup_host_with_linuxbridge_agent()
if self.host_desc.l3_agent:
self.l3_agent = self.useFixture(
process.L3AgentFixture(
self.env_desc, self.host_desc,
self.test_name,
self.neutron_config,
self.l3_agent_cfg_fixture))
if self.host_desc.dhcp_agent:
self.dhcp_agent = self.useFixture(
process.DhcpAgentFixture(
self.env_desc, self.host_desc,
self.test_name,
self.neutron_config,
self.dhcp_agent_cfg_fixture,
namespace=self.host_namespace))
def setup_host_with_ovs_agent(self):
agent_cfg_fixture = config.OVSConfigFixture(
self.env_desc, self.host_desc, self.neutron_config.temp_dir,
self.local_ip, test_name=self.test_name)
self.useFixture(agent_cfg_fixture)
self.br_phys = self.useFixture(
net_helpers.OVSBridgeFixture(
agent_cfg_fixture.get_br_phys_name())).bridge
if self.env_desc.tunneling_enabled:
self.useFixture(
net_helpers.OVSBridgeFixture(
agent_cfg_fixture.get_br_tun_name())).bridge
self.connect_to_central_network_via_tunneling()
else:
self.connect_to_central_network_via_vlans(self.br_phys)
self.ovs_agent = self.useFixture(
process.OVSAgentFixture(
self.env_desc, self.host_desc,
self.test_name, self.neutron_config, agent_cfg_fixture))
if self.host_desc.l3_agent:
self.l3_agent_cfg_fixture = self.useFixture(
config.L3ConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir,
self.ovs_agent.agent_cfg_fixture.get_br_int_name()))
if self.host_desc.dhcp_agent:
self.dhcp_agent_cfg_fixture = self.useFixture(
config.DhcpConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir,
self.ovs_agent.agent_cfg_fixture.get_br_int_name()))
def setup_host_with_sriov_agent(self):
agent_cfg_fixture = config.SRIOVConfigFixture(
self.env_desc, self.host_desc, self.neutron_config.temp_dir,
self.local_ip)
self.useFixture(agent_cfg_fixture)
self.sriov_agent = self.useFixture(
process.SRIOVAgentFixture(
self.env_desc, self.host_desc,
self.test_name, self.neutron_config, agent_cfg_fixture))
def setup_host_with_linuxbridge_agent(self):
# First we need to provide connectivity for agent to prepare proper
# bridge mappings in agent's config:
self.host_namespace = self.useFixture(
net_helpers.NamespaceFixture(prefix="host-")
).name
self.connect_namespace_to_control_network()
agent_cfg_fixture = config.LinuxBridgeConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir,
self.local_ip,
physical_device_name=self.host_port.name
)
self.useFixture(agent_cfg_fixture)
self.linuxbridge_agent = self.useFixture(
process.LinuxBridgeAgentFixture(
self.env_desc, self.host_desc,
self.test_name, self.neutron_config, agent_cfg_fixture,
namespace=self.host_namespace
)
)
if self.host_desc.l3_agent:
self.l3_agent_cfg_fixture = self.useFixture(
config.L3ConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir))
if self.host_desc.dhcp_agent:
self.dhcp_agent_cfg_fixture = self.useFixture(
config.DhcpConfigFixture(
self.env_desc, self.host_desc,
self.neutron_config.temp_dir))
def _connect_ovs_port(self, cidr_address):
ovs_device = self.useFixture(
net_helpers.OVSPortFixture(
bridge=self.central_bridge,
namespace=self.host_namespace)).port
# NOTE: This sets an IP address on the host's root namespace
# which is cleaned up when the device is deleted.
ovs_device.addr.add(cidr_address)
return ovs_device
def connect_namespace_to_control_network(self):
self.host_port = self._connect_ovs_port(
common_utils.ip_to_cidr(self.local_ip, 24)
)
self.host_port.link.set_up()
def connect_to_central_network_via_tunneling(self):
veth_1, veth_2 = self.useFixture(
net_helpers.VethFixture()).ports
# NOTE: This sets an IP address on the host's root namespace
# which is cleaned up when the device is deleted.
veth_1.addr.add(common_utils.ip_to_cidr(self.local_ip, 32))
veth_1.link.set_up()
veth_2.link.set_up()
def connect_to_central_network_via_vlans(self, host_data_bridge):
# If using VLANs as a segmentation device, it's needed to connect
# a provider bridge to a centralized, shared bridge.
net_helpers.create_patch_ports(
self.central_bridge, host_data_bridge)
def allocate_local_ip(self):
if not self.env_desc.network_range:
return str(self.useFixture(
ip_address.ExclusiveIPAddress(
'240.0.0.1', '240.255.255.254')).address)
return str(self.useFixture(
ip_address.ExclusiveIPAddress(
str(self.env_desc.network_range[2]),
str(self.env_desc.network_range[-2]))).address)
def get_bridge(self, network_id):
if "ovs" in self.agents.keys():
return self.ovs_agent.br_int
elif "linuxbridge" in self.agents.keys():
bridge = self.network_bridges.get(network_id, None)
if not bridge:
br_prefix = lb_agent.LinuxBridgeManager.get_bridge_name(
network_id)
bridge = self.useFixture(
net_helpers.LinuxBridgeFixture(
prefix=br_prefix,
namespace=self.host_namespace,
prefix_is_full_name=True)).bridge
self.network_bridges[network_id] = bridge
return bridge
@property
def hostname(self):
return self.neutron_config.config.DEFAULT.host
@property
def l3_agent(self):
return self.agents['l3']
@l3_agent.setter
def l3_agent(self, agent):
self.agents['l3'] = agent
@property
def dhcp_agent(self):
return self.agents['dhcp']
@dhcp_agent.setter
def dhcp_agent(self, agent):
self.agents['dhcp'] = agent
@property
def ovs_agent(self):
return self.agents['ovs']
@ovs_agent.setter
def ovs_agent(self, agent):
self.agents['ovs'] = agent
@property
def sriov_agent(self):
return self.agents['sriov']
@sriov_agent.setter
def sriov_agent(self, agent):
self.agents['sriov'] = agent
@property
def linuxbridge_agent(self):
return self.agents['linuxbridge']
@linuxbridge_agent.setter
def linuxbridge_agent(self, agent):
self.agents['linuxbridge'] = agent
@property
def l2_agent(self):
if self.host_desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE:
return self.linuxbridge_agent
elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_OVS:
return self.ovs_agent
elif self.host_desc.l2_agent_type == constants.AGENT_TYPE_NIC_SWITCH:
return self.sriov_agent
class Environment(fixtures.Fixture):
"""Represents a deployment topology.
Environment is a collection of hosts. It starts a Neutron server
and a parametrized number of Hosts, each a collection of agents.
The Environment accepts a collection of HostDescription, each describing
the type of Host to create.
"""
def __init__(self, env_desc, hosts_desc):
"""Initialize Environment
:param env_desc: An EnvironmentDescription instance.
:param hosts_desc: A list of HostDescription instances.
"""
super(Environment, self).__init__()
self.env_desc = env_desc
self.hosts_desc = hosts_desc
self.hosts = []
def wait_until_env_is_up(self):
common_utils.wait_until_true(
self._processes_are_ready,
timeout=180,
sleep=10)
def _processes_are_ready(self):
try:
running_agents = self.neutron_server.client.list_agents()['agents']
agents_count = sum(len(host.agents) for host in self.hosts)
return len(running_agents) == agents_count
except nc_exc.NeutronClientException:
return False
def _create_host(self, host_desc):
temp_dir = self.useFixture(fixtures.TempDir()).path
neutron_config = config.NeutronConfigFixture(
self.env_desc, host_desc, temp_dir,
cfg.CONF.database.connection, self.rabbitmq_environment)
self.useFixture(neutron_config)
return self.useFixture(
Host(self.env_desc,
host_desc,
self.test_name,
neutron_config,
self.central_bridge))
def _setUp(self):
self.temp_dir = self.useFixture(fixtures.TempDir()).path
# we need this bridge before rabbit and neutron service will start
self.central_bridge = self.useFixture(
net_helpers.OVSBridgeFixture('cnt-data')).bridge
# Get rabbitmq address (and cnt-data network)
rabbitmq_ip_address = self._configure_port_for_rabbitmq()
self.rabbitmq_environment = self.useFixture(
process.RabbitmqEnvironmentFixture(host=rabbitmq_ip_address)
)
plugin_cfg_fixture = self.useFixture(
config.ML2ConfigFixture(
self.env_desc, self.hosts_desc, self.temp_dir,
self.env_desc.network_type))
neutron_cfg_fixture = self.useFixture(
config.NeutronConfigFixture(
self.env_desc, None, self.temp_dir,
cfg.CONF.database.connection, self.rabbitmq_environment))
self.neutron_server = self.useFixture(
process.NeutronServerFixture(
self.env_desc, None,
self.test_name, neutron_cfg_fixture, plugin_cfg_fixture))
if self.env_desc.has_placement:
placement_cfg_fixture = self.useFixture(
config.PlacementConfigFixture(self.env_desc, self.hosts_desc,
self.temp_dir)
)
self.placement = self.useFixture(
process.PlacementFixture(
self.env_desc, self.hosts_desc, self.test_name,
placement_cfg_fixture)
)
self.hosts = [self._create_host(desc) for desc in self.hosts_desc]
self.wait_until_env_is_up()
def _configure_port_for_rabbitmq(self):
self.env_desc.network_range = self._get_network_range()
if not self.env_desc.network_range:
return "127.0.0.1"
rabbitmq_ip = str(self.env_desc.network_range[1])
rabbitmq_port = ip_lib.IPDevice(self.central_bridge.br_name)
rabbitmq_port.addr.add(common_utils.ip_to_cidr(rabbitmq_ip, 24))
rabbitmq_port.link.set_up()
return rabbitmq_ip
def _get_network_range(self):
# NOTE(slaweq): We need to choose IP address on which rabbitmq will be
# available because LinuxBridge agents are spawned in their own
# namespaces and need to know where the rabbitmq server is listening.
# For ovs agent it is not necessary because agents are spawned in
# globalscope together with rabbitmq server so default localhost
# address is fine for them
for desc in self.hosts_desc:
if desc.l2_agent_type == constants.AGENT_TYPE_LINUXBRIDGE:
return self.useFixture(
ip_network.ExclusiveIPNetwork(
"240.0.0.0", "240.255.255.255", "24")).network
| 38.76652
| 79
| 0.648977
|
54496375b538ae3aad7296d7d31b61a717ab0968
| 6,804
|
py
|
Python
|
test/functional/rpc_getblockstats.py
|
Sebz84/ain
|
451abddc7802ac4ee4dbf30117ca074414f4fdca
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
Sebz84/ain
|
451abddc7802ac4ee4dbf30117ca074414f4fdca
|
[
"MIT"
] | null | null | null |
test/functional/rpc_getblockstats.py
|
Sebz84/ain
|
451abddc7802ac4ee4dbf30117ca074414f4fdca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import DefiTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.blocktools import TIME_GENESIS_BLOCK
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(DefiTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = TIME_GENESIS_BLOCK+1
self.nodes[0].setmocktime(mocktime)
self.nodes[0].generate(101)
address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 42
| 121
| 0.628307
|
c3927997db889ca25dd55f32867b6afa548a7624
| 8,430
|
py
|
Python
|
src/cmdlib.py
|
barthy1/coreos-assembler
|
bd3c26292b2e7aae8d8fa026488abff56e8cd8f0
|
[
"Apache-2.0"
] | null | null | null |
src/cmdlib.py
|
barthy1/coreos-assembler
|
bd3c26292b2e7aae8d8fa026488abff56e8cd8f0
|
[
"Apache-2.0"
] | null | null | null |
src/cmdlib.py
|
barthy1/coreos-assembler
|
bd3c26292b2e7aae8d8fa026488abff56e8cd8f0
|
[
"Apache-2.0"
] | null | null | null |
# Python version of cmdlib.sh
"""
Houses helper code for python based coreos-assembler commands.
"""
import hashlib
import json
import os
import subprocess
import sys
import tempfile
import gi
import semver
gi.require_version("RpmOstree", "1.0")
from gi.repository import RpmOstree
from datetime import datetime
def run_verbose(args, **kwargs):
"""
Prints out the command being executed before executing a subprocess call.
:param args: All non-keyword arguments
:type args: list
:param kwargs: All keyword arguments
:type kwargs: dict
:raises: CalledProcessError
"""
print("+ {}".format(subprocess.list2cmdline(args)))
# default to throwing exception
if 'check' not in kwargs.keys():
kwargs['check'] = True
# capture_output is only on python 3.7+. Provide convenience here
# until 3.7 is a baseline:
if kwargs.pop('capture_output', False):
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
try:
process = subprocess.run(args, **kwargs)
except subprocess.CalledProcessError:
fatal("Error running command " + args[0])
return process
def write_json(path, data):
"""
Shortcut for writing a structure as json to the file system.
:param path: The full path to the file to write
:type: path: str
:param data: structure to write out as json
:type data: dict or list
:raises: ValueError, OSError
"""
dn = os.path.dirname(path)
f = tempfile.NamedTemporaryFile(mode='w', dir=dn, delete=False)
json.dump(data, f, indent=4)
os.fchmod(f.file.fileno(), 0o644)
os.rename(f.name, path)
def load_json(path):
"""
Shortcut for loading json from a file path.
:param path: The full path to the file
:type: path: str
:returns: loaded json
:rtype: dict
:raises: IOError, ValueError
"""
with open(path) as f:
return json.load(f)
def sha256sum_file(path):
"""
Calculates the sha256 sum from a path.
:param path: The full path to the file
:type: path: str
:returns: The calculated sha256 sum
:type: str
"""
h = hashlib.sha256()
with open(path, 'rb', buffering=0) as f:
for b in iter(lambda: f.read(128 * 1024), b''):
h.update(b)
return h.hexdigest()
def fatal(msg):
"""
Prints fatal error messages and exits execution.
:param msg: The message to show to output
:type msg: str
:raises: SystemExit
"""
print('fatal: {}'.format(msg), file=sys.stderr)
raise SystemExit(1)
def info(msg):
"""
Prints info messages.
:param msg: The message to show to output
:type msg: str
"""
print('info: {}'.format(msg), file=sys.stderr)
def rfc3339_time(t=None):
"""
Produces a rfc3339 compliant time string.
:param t: The full path to the file
:type: t: datetime.datetime
:returns: a rfc3339 compliant time string
:rtype: str
"""
if t is None:
t = datetime.utcnow()
else:
# if the need arises, we can convert to UTC, but let's just enforce
# this doesn't slip by for now
assert t.tzname() == 'UTC', "Timestamp must be in UTC format"
return t.strftime("%Y-%m-%dT%H:%M:%SZ")
def rm_allow_noent(path):
"""
Removes a file but doesn't error if the file does not exist.
:param path: The full path to the file
:type: path: str
"""
try:
os.unlink(path)
except FileNotFoundError:
pass
# Obviously this is a hack but...we need to know this before
# launching, and I don't think we have structured metadata in e.g. qcow2.
# There are other alternatives but we'll carry this hack for now.
# But if you're reading this comment 10 years in the future, I won't be
# too surprised either ;) Oh and hey if you are please send me an email, it'll
# be like a virtual time capsule! If they still use email then...
def disk_ignition_version(path):
bn = os.path.basename(path)
if bn.startswith(("rhcos-41", "rhcos-42")):
return "2.2.0"
else:
return "3.0.0"
def import_ostree_commit(repo, commit, tarfile):
# create repo in case e.g. tmp/ was cleared out; idempotent
subprocess.check_call(['ostree', 'init', '--repo', repo, '--mode=archive'])
# in the common case where we're operating on a recent build, the OSTree
# commit should already be in the tmprepo
commitpartial = os.path.join(repo, f'state/{commit}.commitpartial')
if (subprocess.call(['ostree', 'show', '--repo', repo, commit],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0
and not os.path.isfile(commitpartial)):
return
with tempfile.TemporaryDirectory(dir=f'{repo}/tmp') as d:
subprocess.check_call(['tar', '-C', d, '-xf', tarfile])
subprocess.check_call(['ostree', 'pull-local', '--repo', repo,
d, commit])
def get_basearch():
try:
return get_basearch.saved
except AttributeError:
get_basearch.saved = RpmOstree.get_basearch()
return get_basearch.saved
# FIXME: Add tests
class Builds: # pragma: nocover
def __init__(self, workdir=None):
self._workdir = workdir
self._fn = self._path("builds/builds.json")
if not os.path.isdir(self._path("builds")):
raise Exception("No builds/ dir found!")
elif os.path.isfile(self._fn):
self._data = load_json(self._fn)
else:
# must be a new workdir; use new schema
self._data = {
'schema-version': "1.0.0",
'builds': []
}
self.flush()
self._version = semver.parse_version_info(
self._data.get('schema-version', "0.0.1"))
# we understand < 2.0.0 only
if self._version._major >= 2:
raise Exception("Builds schema too new; please update cosa")
# for now, since we essentially just support "1.0.0" and "0.0.1",
# just dillute to a bool
self._legacy = (self._version._major < 1)
def _path(self, path):
if not self._workdir:
return path
return os.path.join(self._workdir, path)
def has(self, build_id):
if self._legacy:
return build_id in self._data['builds']
return any([b['id'] == build_id for b in self._data['builds']])
def is_empty(self):
return len(self._data['builds']) == 0
def get_latest(self):
# just let throw if there are none
if self._legacy:
return self._data['builds'][0]
return self._data['builds'][0]['id']
def get_build_arches(self, build_id):
assert not self._legacy
for build in self._data['builds']:
if build['id'] == build_id:
return build['arches']
assert False, "Build not found!"
def get_build_dir(self, build_id, basearch=None):
if build_id == 'latest':
build_id = self.get_latest()
if self._legacy:
return self._path(f"builds/{build_id}")
if not basearch:
# just assume caller wants build dir for current arch
basearch = get_basearch()
return self._path(f"builds/{build_id}/{basearch}")
def insert_build(self, build_id, basearch=None):
if self._legacy:
self._data['builds'].insert(0, build_id)
else:
if not basearch:
basearch = get_basearch()
# for future tooling: allow inserting in an existing build for a
# separate arch
for build in self._data['builds']:
if build['id'] == build_id:
if basearch in build['arches']:
raise "Build {build_id} for {basearch} already exists"
build['arches'] += [basearch]
break
else:
self._data['builds'].insert(0, {
'id': build_id,
'arches': [
basearch
]
})
def bump_timestamp(self):
self._data['timestamp'] = rfc3339_time()
self.flush()
def is_legacy(self):
return self._legacy
def raw(self):
return self._data
def flush(self):
write_json(self._fn, self._data)
| 29.578947
| 79
| 0.598102
|
d9d2f6ef162ea75429facf55e74304c213f45805
| 1,198
|
py
|
Python
|
tests/test_pitch.py
|
Derrick-Nyongesa/Blog
|
aff6b97aac958e6f626c934c57fffba1bb1f845d
|
[
"MIT"
] | null | null | null |
tests/test_pitch.py
|
Derrick-Nyongesa/Blog
|
aff6b97aac958e6f626c934c57fffba1bb1f845d
|
[
"MIT"
] | null | null | null |
tests/test_pitch.py
|
Derrick-Nyongesa/Blog
|
aff6b97aac958e6f626c934c57fffba1bb1f845d
|
[
"MIT"
] | null | null | null |
import unittest
from app.models import Pitch,User,Comment
from app import db
class TestPost(unittest.TestCase):
def setUp(self):
self.user_Daniel = User(username = 'Daniel',password = 'daniel', email = 'daniel@ms.com')
self.new_pitch = Pitch(id=1, category = 'interview',title = 'promotion', pitch ='i love business', user = self.user_Daniel.id, upvotes = 1, downvotes = 0)
def tearDown(self):
Pitch.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.id,1)
self.assertEquals(self.new_pitch.category,'interview')
self.assertEquals(self.new_pitch.title,'promotion')
self.assertEquals(self.new_pitch.pitch,'i love business')
self.assertEquals(self.new_pitch.user_id,self.user_Daniel.id)
self.assertEquals(self.new_pitch.upvotes,1)
self.assertEquals(self.new_pitch.downvotes,0)
def test_save_post(self):
self.new_pitch.save_pitch()
self.assertTrue(len(Pitch.query.all())>0)
def test_get_pitch(self):
self.new_pitch.save()
got_pitch = Pitch.get_pitch(1)
self.assertTrue(get_pitch is not None)
| 39.933333
| 162
| 0.688648
|
1c16f96d3d643557793f8609ab342e4b9ba61332
| 5,735
|
py
|
Python
|
elliot/recommender/latent_factor_models/FM/factorization_machine.py
|
deklanw/elliot
|
729871d29093f63c3fe30d54bc42669fd16eb662
|
[
"Apache-2.0"
] | null | null | null |
elliot/recommender/latent_factor_models/FM/factorization_machine.py
|
deklanw/elliot
|
729871d29093f63c3fe30d54bc42669fd16eb662
|
[
"Apache-2.0"
] | null | null | null |
elliot/recommender/latent_factor_models/FM/factorization_machine.py
|
deklanw/elliot
|
729871d29093f63c3fe30d54bc42669fd16eb662
|
[
"Apache-2.0"
] | null | null | null |
"""
Module description:
"""
__version__ = '0.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo, Daniele Malitesta, Antonio Ferrara'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it,' \
'daniele.malitesta@poliba.it, antonio.ferrara@poliba.it'
import pickle
import numpy as np
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_ratings_sampler as pws
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.latent_factor_models.FM.factorization_machine_model import FactorizationMachineModel
from elliot.recommender.recommender_utils_mixin import RecMixin
from elliot.utils.write import store_recommendation
from elliot.recommender.base_recommender_model import init_charger
np.random.seed(42)
class FM(RecMixin, BaseRecommenderModel):
r"""
Factorization Machines
For further details, please refer to the `paper <https://ieeexplore.ieee.org/document/5694074>`_
Args:
factors: Number of factors of feature embeddings
lr: Learning rate
reg: Regularization coefficient
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
FM:
meta:
save_recs: True
epochs: 10
factors: 10
lr: 0.001
reg: 0.1
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._random = np.random
self._params_list = [
("_factors", "factors", "factors", 10, None, None),
("_learning_rate", "lr", "lr", 0.001, None, None),
("_l_w", "reg", "reg", 0.1, None, None)
]
self.autoset_params()
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._sampler = pws.Sampler(self._data.i_train_dict)
self._model = FactorizationMachineModel(self._num_users, self._num_items, self._factors,
self._l_w, self._learning_rate)
@property
def name(self):
return "FM" \
+ "_e:" + str(self._epochs) \
+ "_bs:" + str(self._batch_size) \
+ f"_{self.get_params_shortcut()}"
def predict(self, u: int, i: int):
pass
def train(self):
if self._restore:
return self.restore_weights()
best_metric_value = 0
for it in range(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
if not (it + 1) % self._validation_rate:
recs = self.get_recommendations(self.evaluator.get_needed_recommendations())
result_dict = self.evaluator.eval(recs)
self._results.append(result_dict)
print(f'Epoch {(it + 1)}/{self._epochs} loss {loss / steps:.3f}')
if self._results[-1][self._validation_k]["val_results"][self._validation_metric] > best_metric_value:
print("******************************************")
best_metric_value = self._results[-1][self._validation_k]["val_results"][self._validation_metric]
if self._save_weights:
self._model.save_weights(self._saving_filepath)
if self._save_recs:
store_recommendation(recs, self._config.path_output_rec_result + f"{self.name}-it:{it + 1}.tsv")
def get_recommendations(self, k: int = 100):
predictions_top_k = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])))
v, i = self._model.get_top_k(predictions, self.get_train_mask(offset, offset_stop), k=k)
items_ratings_pair = [list(zip(map(self._data.private_items.get, u_list[0]), u_list[1]))
for u_list in list(zip(i.numpy(), v.numpy()))]
predictions_top_k.update(dict(zip(map(self._data.private_users.get,
range(offset, offset_stop)), items_ratings_pair)))
return predictions_top_k
def restore_weights(self):
try:
with open(self._saving_filepath, "rb") as f:
self._model.set_model_state(pickle.load(f))
print(f"Model correctly Restored")
recs = self.get_recommendations(self.evaluator.get_needed_recommendations())
result_dict = self.evaluator.eval(recs)
self._results.append(result_dict)
print("******************************************")
if self._save_recs:
store_recommendation(recs, self._config.path_output_rec_result + f"{self.name}.tsv")
return True
except Exception as ex:
print(f"Error in model restoring operation! {ex}")
return False
| 39.280822
| 120
| 0.604359
|
3f317052dcb126beb42b5a8472bbeedc9daac3ee
| 181
|
py
|
Python
|
deepmux/templates.py
|
Deep-Mux/deepmux-cli
|
ff147259ffb1b0bef613f9b15e4e029fb859d797
|
[
"MIT"
] | 4
|
2020-11-23T18:56:25.000Z
|
2021-03-19T23:38:24.000Z
|
deepmux/templates.py
|
Deep-Mux/deepmux-cli
|
ff147259ffb1b0bef613f9b15e4e029fb859d797
|
[
"MIT"
] | null | null | null |
deepmux/templates.py
|
Deep-Mux/deepmux-cli
|
ff147259ffb1b0bef613f9b15e4e029fb859d797
|
[
"MIT"
] | null | null | null |
python_function_basic = """name: <function name>
env: <function env>
python:
call: <required, module:function to call>
# requirements: <optional, path to requirements.txt>
"""
| 25.857143
| 54
| 0.718232
|
82251aa5cefa9ed7f9a6ff69ed68caf447e5b3a8
| 1,582
|
py
|
Python
|
tests/scanning/test_strings.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 2
|
2021-12-18T01:52:50.000Z
|
2022-01-17T19:41:52.000Z
|
tests/scanning/test_strings.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | 18
|
2021-11-30T04:05:53.000Z
|
2022-02-01T03:30:04.000Z
|
tests/scanning/test_strings.py
|
sco1/pylox
|
b4820828306c20cee3f8533c2547fafb92c6c1bd
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
import pytest_check as check
from pylox.lox import Lox
from pylox.scanner import Scanner
from pylox.tokens import Token, TokenType
# Base test cases from https://github.com/munificent/craftinginterpreters/blob/master/test/scanning/strings.lox
TEST_SRC = dedent(
"""\
""
"string"
'string'
"multi-
line-
string"
"""
)
TRUTH_TOKENS = [
Token(
token_type=TokenType.STRING,
lexeme='""',
literal="",
lineno=0,
end_lineno=0,
col_offset=0,
end_col_offset=2,
),
Token(
token_type=TokenType.STRING,
lexeme='"string"',
literal="string",
lineno=1,
end_lineno=1,
col_offset=0,
end_col_offset=8,
),
Token(
token_type=TokenType.STRING,
lexeme="'string'",
literal="string",
lineno=2,
end_lineno=2,
col_offset=0,
end_col_offset=8,
),
Token(
token_type=TokenType.STRING,
lexeme='"multi-\nline-\nstring"',
literal="multi-\nline-\nstring",
lineno=3,
end_lineno=5,
col_offset=0,
end_col_offset=7,
),
Token(
token_type=TokenType.EOF,
lexeme="",
literal=None,
lineno=6,
end_lineno=6,
col_offset=0,
end_col_offset=0,
),
]
def test_string_scanning() -> None:
scanner = Scanner(TEST_SRC, Lox())
tokens = scanner.scan_tokens()
for idx, token in enumerate(tokens):
check.equal(token, TRUTH_TOKENS[idx])
| 20.815789
| 111
| 0.573957
|
2846baa17e5ab7204d1f59aff523ecfc2b3dccdb
| 3,252
|
py
|
Python
|
openregistry/lots/loki/constants.py
|
kukirokuk/openregistry.lots.loki
|
0467fc881ad0bf31c40cd85adad49e5df14544df
|
[
"Apache-2.0"
] | null | null | null |
openregistry/lots/loki/constants.py
|
kukirokuk/openregistry.lots.loki
|
0467fc881ad0bf31c40cd85adad49e5df14544df
|
[
"Apache-2.0"
] | null | null | null |
openregistry/lots/loki/constants.py
|
kukirokuk/openregistry.lots.loki
|
0467fc881ad0bf31c40cd85adad49e5df14544df
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import timedelta
AUCTION_STATUSES = ['scheduled', 'active', 'complete', 'unsuccessful', 'cancelled']
LOT_STATUSES = [
"draft", "composing", "verification", "pending", "pending.deleted", "deleted", "active.salable",
"active.auction", "active.contracting", "pending.sold", "pending.dissolution", "sold", "dissolved", "invalid"]
ITEM_EDITING_STATUSES = ['draft', 'composing', 'pending']
STATUS_CHANGES = {
"draft": {
"editing_permissions": ["lot_owner", "Administrator"],
"next_status": {
"composing": ["lot_owner", "Administrator"],
}
},
"composing": {
"editing_permissions": ["lot_owner", "Administrator"],
"next_status": {
"verification": ["lot_owner", "Administrator"]
}
},
"verification": {
"editing_permissions": ["concierge"],
"next_status": {
"pending": ["concierge"],
"invalid": ["concierge"],
}
},
"pending": {
"editing_permissions": ["lot_owner", "Administrator", "chronograph"],
"next_status": {
"pending.deleted": ["lot_owner", "Administrator"],
"active.salable": ["chronograph", "Administrator"],
}
},
"pending.deleted": {
"editing_permissions": ["concierge", "Administrator"],
"next_status": {
"deleted": ["concierge", "Administrator"],
}
},
"deleted": {
"editing_permissions": [],
"next_status": {}
},
"active.salable": {
"editing_permissions": ["Administrator", "concierge"],
"next_status": {
"active.auction": ["Administrator", "concierge"]
}
},
"active.auction": {
"editing_permissions": ["Administrator", "convoy"],
"next_status": {
"active.contracting": ["Administrator", "convoy"],
"pending.dissolution": ["Administrator", "convoy"],
"active.salable": ["Administrator", "convoy"]
}
},
"active.contracting": {
"editing_permissions": ["Administrator", "convoy"],
"next_status": {
"pending.sold": ["Administrator", "convoy"],
"pending.dissolution": ["Administrator", "convoy"],
}
},
"pending.sold": {
"editing_permissions": ["Administrator", "concierge"],
"next_status": {
"sold": ["Administrator", "concierge"]
}
},
"pending.dissolution": {
"editing_permissions": ["Administrator", "concierge"],
"next_status": {
"dissolved": ["Administrator", "concierge"]
}
},
"sold": {
"editing_permissions": [],
"next_status": {}
},
"dissolved": {
"editing_permissions": [],
"next_status": {}
},
"invalid": {
"editing_permissions": [],
"next_status": {}
},
}
AUCTION_DOCUMENT_TYPES = [
'notice',
'technicalSpecifications',
'evaluationCriteria',
'illustration',
'x_PublicAssetCertificate',
'x_PlatformLegalDetails',
'x_presentation',
'bidders',
'x_nda',
'x_dgfAssetFamiliarization'
]
RECTIFICATION_PERIOD_DURATION = timedelta(days=2)
DEFAULT_DUTCH_STEPS = 99
| 29.297297
| 114
| 0.551661
|
c7ba59b6cd1044bd5807ffa7d4a4d0ec2ff5f793
| 436
|
py
|
Python
|
aiogram/types/photo_size.py
|
victorusachev/aiogram
|
9571669ca4b06165031d8f9830130f3c638b60d8
|
[
"MIT"
] | 3
|
2020-12-06T16:55:53.000Z
|
2021-11-19T19:25:57.000Z
|
aiogram/types/photo_size.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | 1
|
2019-10-18T19:33:20.000Z
|
2019-10-18T19:33:20.000Z
|
aiogram/types/photo_size.py
|
Kylmakalle/aiogram
|
550c41e1752aa08c493d7cb4ec5fec402d8e849c
|
[
"MIT"
] | 2
|
2020-12-30T09:51:30.000Z
|
2021-11-10T16:50:28.000Z
|
from . import base
from . import fields
from . import mixins
class PhotoSize(base.TelegramObject, mixins.Downloadable):
"""
This object represents one size of a photo or a file / sticker thumbnail.
https://core.telegram.org/bots/api#photosize
"""
file_id: base.String = fields.Field()
width: base.Integer = fields.Field()
height: base.Integer = fields.Field()
file_size: base.Integer = fields.Field()
| 27.25
| 77
| 0.697248
|
68c0b607e9e93ffc59b20a7f37b59da58688b642
| 2,138
|
py
|
Python
|
tests/build/scipy/scipy/io/tests/test_fortran.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | 26
|
2018-02-14T23:52:58.000Z
|
2021-08-16T13:50:03.000Z
|
tests/build/scipy/scipy/io/tests/test_fortran.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | null | null | null |
tests/build/scipy/scipy/io/tests/test_fortran.py
|
crougeux/-a-i_v1.6.3_modif
|
b499a812e79f335d082d3f9b1070e0465ad67bab
|
[
"BSD-3-Clause"
] | 10
|
2018-08-13T19:38:39.000Z
|
2020-04-19T03:02:00.000Z
|
''' Tests for fortran sequential files '''
import tempfile
import shutil
from os import path
from glob import iglob
import re
from numpy.testing import assert_equal
import numpy as np
from scipy.io import FortranFile
DATA_PATH = path.join(path.dirname(__file__), 'data')
def test_fortranfiles_read():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
f = FortranFile(filename, 'r', '<u4')
data = f.read_record(dtype=m.group(1).replace('s', '<')).reshape(dims)
f.close()
counter = 0
for k in range(dims[2]):
for j in range(dims[1]):
for i in range(dims[0]):
assert_equal(counter, data[i,j,k])
counter += 1
def test_fortranfiles_write():
for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")):
m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I)
if not m:
raise RuntimeError("Couldn't match %s filename to regex" % filename)
dims = (int(m.group(2)), int(m.group(3)), int(m.group(4)))
counter = 0
data = np.zeros(dims, dtype=m.group(1).replace('s', '<'))
for k in range(dims[2]):
for j in range(dims[1]):
for i in range(dims[0]):
data[i,j,k] = counter
counter += 1
tmpdir = tempfile.mkdtemp()
try:
testFile = path.join(tmpdir,path.basename(filename))
f = FortranFile(testFile, 'w','<u4')
f.write_record(data)
f.close()
originalfile = open(filename, 'rb')
newfile = open(testFile, 'rb')
assert_equal(originalfile.read(), newfile.read(),
err_msg=filename)
originalfile.close()
newfile.close()
finally:
shutil.rmtree(tmpdir)
| 32.892308
| 80
| 0.543966
|
81533c33e0bb937df2eaa7714af282381a69f866
| 704
|
py
|
Python
|
API/V1/Python/API.V1.RPC.Client.Python.Example/API.V1.RPC.Client.Python.Example.py
|
multiplayground/DotNet.Common
|
f8f9c701e111849930ff99e4b48ae2d4640c7db5
|
[
"Apache-2.0"
] | null | null | null |
API/V1/Python/API.V1.RPC.Client.Python.Example/API.V1.RPC.Client.Python.Example.py
|
multiplayground/DotNet.Common
|
f8f9c701e111849930ff99e4b48ae2d4640c7db5
|
[
"Apache-2.0"
] | 7
|
2019-04-30T16:40:03.000Z
|
2019-05-14T19:35:53.000Z
|
API/V1/Python/API.V1.RPC.Client.Python.Example/API.V1.RPC.Client.Python.Example.py
|
multiplayground/DotNet.Common
|
f8f9c701e111849930ff99e4b48ae2d4640c7db5
|
[
"Apache-2.0"
] | 2
|
2019-04-16T12:37:37.000Z
|
2019-04-30T21:23:28.000Z
|
#!/usr/bin/env python
from RpcClient import RpcClient
import asyncio
import os #https://stackoverflow.com/questions/4906977/how-to-access-environment-variable-values
def getEnvUserPass(userDef="guest", passDef="guest",)->(str,str):
theUser = os.getenv('RabbitMQ_User', userDef)
thePass = os.getenv('RabbitMQ_Pass', passDef)
return (theUser, thePass)
async def asyncRpcClient():
theUser, thePass = getEnvUserPass()
with RpcClient(aUser=theUser, aPass=thePass) as rpcClient:
message = '2'
print(f" [x] Requesting {message})")
response = rpcClient.call(message)
print(f" [.] Got {response}")
if __name__ == '__main__':
asyncio.run(asyncRpcClient())
| 35.2
| 96
| 0.697443
|
34939d48437bdf1552e4330805d7bc4573877a9e
| 15,345
|
py
|
Python
|
projects/ISTR/istr/util/misc.py
|
braindevices/ISTR
|
520b0d410ba8be5dbf53971d962b0bfcf072a7c0
|
[
"Apache-2.0"
] | null | null | null |
projects/ISTR/istr/util/misc.py
|
braindevices/ISTR
|
520b0d410ba8be5dbf53971d962b0bfcf072a7c0
|
[
"Apache-2.0"
] | null | null | null |
projects/ISTR/istr/util/misc.py
|
braindevices/ISTR
|
520b0d410ba8be5dbf53971d962b0bfcf072a7c0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
from distutils.version import LooseVersion
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if LooseVersion(torchvision.__version__) < LooseVersion("0.7"):
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 32.788462
| 116
| 0.608407
|
884d11f4da2bbc23f410fb594593d284190978f5
| 13,487
|
py
|
Python
|
src/manual_solve.py
|
PDorrian/ARC
|
ae075225e19a5f721eee8f55fb9d36ba0b882ccd
|
[
"Apache-2.0"
] | null | null | null |
src/manual_solve.py
|
PDorrian/ARC
|
ae075225e19a5f721eee8f55fb9d36ba0b882ccd
|
[
"Apache-2.0"
] | null | null | null |
src/manual_solve.py
|
PDorrian/ARC
|
ae075225e19a5f721eee8f55fb9d36ba0b882ccd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os, sys
import json
import numpy as np
import re
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
"""
Name: Patrick Dorrian
Student ID: 17372533
GitHub Repo: github.com/PDorrian/ARC
"""
def solve_6ecd11f4(input_grid):
"""
Input:
- A large black grid of variable size
- A small multi-coloured square of variable size placed anywhere on the grid.
- A large pattern made up of n*n blocks of pixels, similar to an old video game sprite. The pixel squares are
of variable size and the pattern itself is also made up of a variable number of pixel squares.
Output:
- The large pattern simplified to its smallest form, the pixels of which will be coloured with the colours
of the small multi-coloured square.
Transformation:
1. The regions in which the large pattern and the small multi-coloured square lie are identified.
2. The dimensions of the coloured square are used to find the locations of the first pixel in each
n*n pixel square. This is used to determine the pattern in the output grid.
3. The multi-coloured square is sampled to get the correct colourings for the output pattern.
Training grids solved: 3/3
Testing grids solved: 1/1
"""
# Find color of main section.
main_color = max(range(1, 10), key=lambda i: np.sum(input_grid == i))
# Find bounds and size of main shape.
main_shape = np.where(input_grid == main_color) # Locations of cells containing the main color
main_shape = [(x,y) for x,y in zip(*main_shape)] # Reformat as pairs of coordinates.
main_shape = [(x,y) for x,y in main_shape # Discard stray main colors (from color square).
if any((a,b) in main_shape for a,b in [(x,y+1), (x,y-1), (x+1,y), (x-1,y)])]
# Calculate size of bounding square of the main shape.
main_shape_size = max(main_shape, key=lambda pair: pair[0])[0] - min(main_shape, key=lambda pair: pair[0])[0] + 1
# Top left coordinate of the main shape.
main_shape_start_x, main_shape_start_y = main_shape[0]
# Create copy of input and remove the main shape from it.
# All that should remain is the color square.
color_square = np.copy(input_grid)
for x,y in main_shape:
# Pixels of the main shape are set to 0.
color_square[x,y] = 0
# All 0 rows are removed to determine the size of the color square.
color_square_size = len(color_square[np.any(color_square > 0, axis=1)])
# Size of a tile in the main shape can now be determined.
main_tile_size = main_shape_size // color_square_size
# Remove 0 values and reshape into a square to get the color block.
color_square = color_square[color_square > 0].reshape(color_square_size, color_square_size)
# Loop over each tile of the color square.
for i in range(color_square_size):
for j in range(color_square_size):
# Set tile to 0 if the corresponding tile in the main shape is also 0.
if input_grid[main_shape_start_x + main_tile_size * i][main_shape_start_y + main_tile_size * j] == 0:
color_square[i][j] = 0
return color_square
def solve_2dd70a9a(input_grid):
"""
Input:
- A large black grid of variable size
- A 2x1 or 1x2 green rectangle placed somewhere on the grid
- A 2x1 or 1x2 red rectangle placed somewhere on the grid
- Many cyan pixels scattered across the grid
Output:
- The input grid with a green continuous line connecting the red and green rectangles while avoiding the cyan.
The green line emanates from the green rectangle along its axis of direction.
The line only changes direction when it is about to touch a cyan pixel
Transformation:
1. The locations and orientations of the red and green rectangles are determined
2. One of the two possible directions are chosen at random and a walk begins from the green rectangle in
that direction.
3. When a cyan pixel is encountered, the walk changes direction. The direction is determined by using
a heuristic measurement of the distance from the next potential step to the red rectangle.
4. If a path is not found, the process repeats but using the other starting direction.
Training grids solved: 3/3
Testing grids solved: 1/1
"""
# Identify the start and end positions.
# Green rectangle position.
green_rect = np.where(input_grid == 3)
green_rect = [(x,y) for x,y in zip(*green_rect)] # Format as (x,y) pairs.
# Red rectangle position.
red_rect = np.where(input_grid == 2)
red_rect = [(x,y) for x,y in zip(*red_rect)] # Format as (x,y) pairs.
# Determine axis of green rectangle.
green_ax = 'y' if green_rect[0][0] == green_rect[1][0] else 'x'
# Try both possible directions.
for s in [1, -1]:
# End goal = centre of red rectangle.
end = (red_rect[0][0] + red_rect[1][0])/2, (red_rect[0][1] + red_rect[1][1])/2
# Start from one of the green pixels.
start = green_rect[s]
x, y = start # Current position
axis = green_ax # Current axis of movement
direction = 1 if axis == 'y' else -1 # Should movement be in the positive or negative direction?
direction *= s # Reversed when starting from the other pixel.
# Explanation of direction and axis:
# + -
# x R L
# y D U
# A copy of the grid that can be safely edited.
grid = np.copy(input_grid)
found = False
for i in range(50):
nx, ny = x, y # Candidates for next position.
# Movement based on current axis and direction.
if axis == 'x':
nx += direction
else:
ny += direction
# Check if next position would be a wall.
if input_grid[nx][ny] == 8:
nx, ny = x, y # Reset candidate position
# Change axis and determine new direction.
if axis == 'x':
axis = 'y'
# Next position and direction determined by which has the shortest heuristic path to the end.
ny = min(ny+1, ny-1, key=lambda d: manhattan_distance((nx, d), end))
direction = 1 if ny > y else -1
elif axis == 'y':
axis = 'x'
# Next position and direction determined by which has the shortest heuristic path to the end.
nx = min(nx+1, nx-1, key=lambda d: manhattan_distance((d, ny), end))
direction = 1 if nx > x else -1
# Lock in candidate coordinates
x, y = nx, ny
# Check that we are within bounds
if x<0 or y<0 or x>=len(grid) or y>=len(grid):
break
# Check if we have reached the end goal.
if grid[x][y] == 2:
return grid
# Check that we have not intersected our own path.
if grid[x][y] == 3 and i > 1:
break
# Color path green.
grid[x][y] = 3
# Raise exception if a valid solution is not found.
raise Exception("No solution found.")
# Auxiliary function for solve_2dd70a9a
# Calculates Manhattan distance between two pairs of coordinates.
def manhattan_distance(a, b):
return abs(a[0]-b[0]) + abs(a[1]-b[1])
def solve_7df24a62(input_grid):
"""
Input:
- A large black grid of variable size.
- Yellow cells peppered throughout the black grid.
- A blue box surrounding a small arrangement of yellow cells.
Output:
- The input grid but with all clusters of yellow cells that have the same pattern as the highlighted
cells (or some rotation of the pattern) are also highlighted in blue.
Transformation:
1. Identify the highlighted pattern and generate all possible rotations of said pattern.
2. Iterate through every cell of the grid and check if the nearby cells are identical to the
highlighted pattern.
3. If there is a match, highlight this area.
Training grids solved: 3/3
Testing grids solved: 1/1
"""
# Find location of blue rectangle (the stencil).
ys, xs = np.where(input_grid == 1)
# Width and height of the blue area
width = max(xs) - min(xs)
height = max(ys) - min(ys)
# Identify pattern within the blue area
pattern = input_grid[ys[0]+1:ys[0]+height, xs[0]+1:xs[0]+width]
pattern = np.where(pattern == 1, 0, pattern)
# Generate a list of each possible pattern rotation
pattern_rotations = [pattern, np.rot90(pattern), np.rot90(np.rot90(pattern)), np.rot90(np.rot90(np.rot90(pattern)))]
# Iterate through each pattern rotation
for rot in pattern_rotations:
height, width = rot.shape # Width and height of rotated shape
# Iterate over each pixel in the grid
for i in range(0, len(input_grid)-height+1):
for j in range(0, len(input_grid[0])-width+1):
# Generate segment with width and height of the pattern starting at the current pixel
segment = input_grid[i:i+height, j:j+width]
# Check if the segment is identical to the pattern
if (segment == rot).all():
# Select area that will be colored blue
blue_area = input_grid[max(i-1, 0):i+height+1, max(0,j-1):j+width+1]
# Change area's black pixels to blue
input_grid[max(i-1, 0):i+height+1, max(0, j-1):j+width+1] = np.where(blue_area == 0, 1, blue_area)
# Return modified grid
return input_grid
"""
Summary/Reflection
Python features and libraries used:
- The only external library used in any of the solve functions is numpy.
- All other features used are standard Python functions and operations, like max() and slicing.
Commonalities/Differences:
- Each function makes use of np.where() to identify regions with a specific colour.
- The first and third functions both use a nested for loop to iterate over all cells in a region.
- All 3 functions use .reshape to change the shapes of arrays.
"""
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
if y.shape != yhat.shape:
print(f"False. Incorrect shape: {y.shape} v {yhat.shape}")
else:
print(np.all(y == yhat))
if __name__ == "__main__": main()
| 39.092754
| 120
| 0.623638
|
fa66658feed6a628150b6e8c80870987509fe95f
| 15,121
|
py
|
Python
|
aiohttp/web.py
|
mihajenko/aiohttp
|
1016f552a0962c2a71e72e2212d3ca1051a3f53e
|
[
"Apache-2.0"
] | 49
|
2019-07-11T13:13:02.000Z
|
2022-03-26T14:46:23.000Z
|
aiohttp/web.py
|
mihajenko/aiohttp
|
1016f552a0962c2a71e72e2212d3ca1051a3f53e
|
[
"Apache-2.0"
] | 26
|
2019-07-18T15:32:20.000Z
|
2022-03-13T10:34:52.000Z
|
aiohttp/web.py
|
mihajenko/aiohttp
|
1016f552a0962c2a71e72e2212d3ca1051a3f53e
|
[
"Apache-2.0"
] | 26
|
2019-08-02T08:11:21.000Z
|
2022-03-31T10:14:21.000Z
|
import asyncio
import logging
import socket
import sys
from argparse import ArgumentParser
from collections.abc import Iterable
from importlib import import_module
from typing import Any, Awaitable, Callable, List, Optional, Type, Union, cast
from .abc import AbstractAccessLogger
from .helpers import all_tasks
from .log import access_logger
from .web_app import Application, CleanupError
from .web_exceptions import (
HTTPAccepted,
HTTPBadGateway,
HTTPBadRequest,
HTTPClientError,
HTTPConflict,
HTTPCreated,
HTTPError,
HTTPException,
HTTPExpectationFailed,
HTTPFailedDependency,
HTTPForbidden,
HTTPFound,
HTTPGatewayTimeout,
HTTPGone,
HTTPInsufficientStorage,
HTTPInternalServerError,
HTTPLengthRequired,
HTTPMethodNotAllowed,
HTTPMisdirectedRequest,
HTTPMovedPermanently,
HTTPMultipleChoices,
HTTPNetworkAuthenticationRequired,
HTTPNoContent,
HTTPNonAuthoritativeInformation,
HTTPNotAcceptable,
HTTPNotExtended,
HTTPNotFound,
HTTPNotImplemented,
HTTPNotModified,
HTTPOk,
HTTPPartialContent,
HTTPPaymentRequired,
HTTPPermanentRedirect,
HTTPPreconditionFailed,
HTTPPreconditionRequired,
HTTPProxyAuthenticationRequired,
HTTPRedirection,
HTTPRequestEntityTooLarge,
HTTPRequestHeaderFieldsTooLarge,
HTTPRequestRangeNotSatisfiable,
HTTPRequestTimeout,
HTTPRequestURITooLong,
HTTPResetContent,
HTTPSeeOther,
HTTPServerError,
HTTPServiceUnavailable,
HTTPSuccessful,
HTTPTemporaryRedirect,
HTTPTooManyRequests,
HTTPUnauthorized,
HTTPUnavailableForLegalReasons,
HTTPUnprocessableEntity,
HTTPUnsupportedMediaType,
HTTPUpgradeRequired,
HTTPUseProxy,
HTTPVariantAlsoNegotiates,
HTTPVersionNotSupported,
)
from .web_fileresponse import FileResponse
from .web_log import AccessLogger
from .web_middlewares import middleware, normalize_path_middleware
from .web_protocol import (
PayloadAccessError,
RequestHandler,
RequestPayloadError,
)
from .web_request import BaseRequest, FileField, Request
from .web_response import (
ContentCoding,
Response,
StreamResponse,
json_response,
)
from .web_routedef import (
AbstractRouteDef,
RouteDef,
RouteTableDef,
StaticDef,
delete,
get,
head,
options,
patch,
post,
put,
route,
static,
view,
)
from .web_runner import (
AppRunner,
BaseRunner,
BaseSite,
GracefulExit,
ServerRunner,
SockSite,
TCPSite,
UnixSite,
)
from .web_server import Server
from .web_urldispatcher import (
AbstractResource,
AbstractRoute,
DynamicResource,
PlainResource,
Resource,
ResourceRoute,
StaticResource,
UrlDispatcher,
UrlMappingMatchInfo,
View,
)
from .web_ws import WebSocketReady, WebSocketResponse, WSMsgType
__all__ = (
# web_app
'Application',
'CleanupError',
# web_exceptions
'HTTPAccepted',
'HTTPBadGateway',
'HTTPBadRequest',
'HTTPClientError',
'HTTPConflict',
'HTTPCreated',
'HTTPError',
'HTTPException',
'HTTPExpectationFailed',
'HTTPFailedDependency',
'HTTPForbidden',
'HTTPFound',
'HTTPGatewayTimeout',
'HTTPGone',
'HTTPInsufficientStorage',
'HTTPInternalServerError',
'HTTPLengthRequired',
'HTTPMethodNotAllowed',
'HTTPMisdirectedRequest',
'HTTPMovedPermanently',
'HTTPMultipleChoices',
'HTTPNetworkAuthenticationRequired',
'HTTPNoContent',
'HTTPNonAuthoritativeInformation',
'HTTPNotAcceptable',
'HTTPNotExtended',
'HTTPNotFound',
'HTTPNotImplemented',
'HTTPNotModified',
'HTTPOk',
'HTTPPartialContent',
'HTTPPaymentRequired',
'HTTPPermanentRedirect',
'HTTPPreconditionFailed',
'HTTPPreconditionRequired',
'HTTPProxyAuthenticationRequired',
'HTTPRedirection',
'HTTPRequestEntityTooLarge',
'HTTPRequestHeaderFieldsTooLarge',
'HTTPRequestRangeNotSatisfiable',
'HTTPRequestTimeout',
'HTTPRequestURITooLong',
'HTTPResetContent',
'HTTPSeeOther',
'HTTPServerError',
'HTTPServiceUnavailable',
'HTTPSuccessful',
'HTTPTemporaryRedirect',
'HTTPTooManyRequests',
'HTTPUnauthorized',
'HTTPUnavailableForLegalReasons',
'HTTPUnprocessableEntity',
'HTTPUnsupportedMediaType',
'HTTPUpgradeRequired',
'HTTPUseProxy',
'HTTPVariantAlsoNegotiates',
'HTTPVersionNotSupported',
# web_fileresponse
'FileResponse',
# web_middlewares
'middleware',
'normalize_path_middleware',
# web_protocol
'PayloadAccessError',
'RequestHandler',
'RequestPayloadError',
# web_request
'BaseRequest',
'FileField',
'Request',
# web_response
'ContentCoding',
'Response',
'StreamResponse',
'json_response',
# web_routedef
'AbstractRouteDef',
'RouteDef',
'RouteTableDef',
'StaticDef',
'delete',
'get',
'head',
'options',
'patch',
'post',
'put',
'route',
'static',
'view',
# web_runner
'AppRunner',
'BaseRunner',
'BaseSite',
'GracefulExit',
'ServerRunner',
'SockSite',
'TCPSite',
'UnixSite',
# web_server
'Server',
# web_urldispatcher
'AbstractResource',
'AbstractRoute',
'DynamicResource',
'PlainResource',
'Resource',
'ResourceRoute',
'StaticResource',
'UrlDispatcher',
'UrlMappingMatchInfo',
'View',
# web_ws
'WebSocketReady',
'WebSocketResponse',
'WSMsgType',
# web
'run_app',
)
try:
from ssl import SSLContext
except ImportError: # pragma: no cover
SSLContext = Any # type: ignore
async def _run_app(app: Union[Application, Awaitable[Application]], *,
host: Optional[str]=None,
port: Optional[int]=None,
path: Optional[str]=None,
sock: Optional[socket.socket]=None,
shutdown_timeout: float=60.0,
ssl_context: Optional[SSLContext]=None,
print: Callable[..., None]=print,
backlog: int=128,
access_log_class: Type[AbstractAccessLogger]=AccessLogger,
access_log_format: str=AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger]=access_logger,
handle_signals: bool=True,
reuse_address: Optional[bool]=None,
reuse_port: Optional[bool]=None) -> None:
# A internal functio to actually do all dirty job for application running
if asyncio.iscoroutine(app):
app = await app # type: ignore
app = cast(Application, app)
runner = AppRunner(app, handle_signals=handle_signals,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log)
await runner.setup()
sites = [] # type: List[BaseSite]
try:
if host is not None:
if isinstance(host, (str, bytes, bytearray, memoryview)):
sites.append(TCPSite(runner, host, port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port))
else:
for h in host:
sites.append(TCPSite(runner, h, port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port))
elif path is None and sock is None or port is not None:
sites.append(TCPSite(runner, port=port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context, backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port))
if path is not None:
if isinstance(path, (str, bytes, bytearray, memoryview)):
sites.append(UnixSite(runner, path,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog))
else:
for p in path:
sites.append(UnixSite(runner, p,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog))
if sock is not None:
if not isinstance(sock, Iterable):
sites.append(SockSite(runner, sock,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog))
else:
for s in sock:
sites.append(SockSite(runner, s,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog))
for site in sites:
await site.start()
if print: # pragma: no branch
names = sorted(str(s.name) for s in runner.sites)
print("======== Running on {} ========\n"
"(Press CTRL+C to quit)".format(', '.join(names)))
while True:
await asyncio.sleep(3600) # sleep forever by 1 hour intervals
finally:
await runner.cleanup()
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
to_cancel = all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
def run_app(app: Union[Application, Awaitable[Application]], *,
host: Optional[str]=None,
port: Optional[int]=None,
path: Optional[str]=None,
sock: Optional[socket.socket]=None,
shutdown_timeout: float=60.0,
ssl_context: Optional[SSLContext]=None,
print: Callable[..., None]=print,
backlog: int=128,
access_log_class: Type[AbstractAccessLogger]=AccessLogger,
access_log_format: str=AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger]=access_logger,
handle_signals: bool=True,
reuse_address: Optional[bool]=None,
reuse_port: Optional[bool]=None) -> None:
"""Run an app locally"""
loop = asyncio.get_event_loop()
# Configure if and only if in debugging mode and using the default logger
if loop.get_debug() and access_log and access_log.name == 'aiohttp.access':
if access_log.level == logging.NOTSET:
access_log.setLevel(logging.DEBUG)
if not access_log.hasHandlers():
access_log.addHandler(logging.StreamHandler())
try:
loop.run_until_complete(_run_app(app,
host=host,
port=port,
path=path,
sock=sock,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
print=print,
backlog=backlog,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log,
handle_signals=handle_signals,
reuse_address=reuse_address,
reuse_port=reuse_port))
except (GracefulExit, KeyboardInterrupt): # pragma: no cover
pass
finally:
_cancel_all_tasks(loop)
if sys.version_info >= (3, 6): # don't use PY_36 to pass mypy
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def main(argv: List[str]) -> None:
arg_parser = ArgumentParser(
description="aiohttp.web Application server",
prog="aiohttp.web"
)
arg_parser.add_argument(
"entry_func",
help=("Callable returning the `aiohttp.web.Application` instance to "
"run. Should be specified in the 'module:function' syntax."),
metavar="entry-func"
)
arg_parser.add_argument(
"-H", "--hostname",
help="TCP/IP hostname to serve on (default: %(default)r)",
default="localhost"
)
arg_parser.add_argument(
"-P", "--port",
help="TCP/IP port to serve on (default: %(default)r)",
type=int,
default="8080"
)
arg_parser.add_argument(
"-U", "--path",
help="Unix file system path to serve on. Specifying a path will cause "
"hostname and port arguments to be ignored.",
)
args, extra_argv = arg_parser.parse_known_args(argv)
# Import logic
mod_str, _, func_str = args.entry_func.partition(":")
if not func_str or not mod_str:
arg_parser.error(
"'entry-func' not in 'module:function' syntax"
)
if mod_str.startswith("."):
arg_parser.error("relative module names not supported")
try:
module = import_module(mod_str)
except ImportError as ex:
arg_parser.error("unable to import %s: %s" % (mod_str, ex))
try:
func = getattr(module, func_str)
except AttributeError:
arg_parser.error("module %r has no attribute %r" % (mod_str, func_str))
# Compatibility logic
if args.path is not None and not hasattr(socket, 'AF_UNIX'):
arg_parser.error("file system paths not supported by your operating"
" environment")
logging.basicConfig(level=logging.DEBUG)
app = func(extra_argv)
run_app(app, host=args.hostname, port=args.port, path=args.path)
arg_parser.exit(message="Stopped\n")
if __name__ == "__main__": # pragma: no branch
main(sys.argv[1:]) # pragma: no cover
| 31.17732
| 79
| 0.587329
|
793455b6242409c3d4df7512aa847d43e9236e9a
| 4,462
|
py
|
Python
|
osr2mp4/osrparse/replay.py
|
spaceskynet/osr2mp4-core
|
373e0fe3545a0eccfb4cc528b968e3e8e39857f2
|
[
"MIT"
] | 103
|
2020-06-07T15:28:41.000Z
|
2022-03-01T17:07:35.000Z
|
osr2mp4/osrparse/replay.py
|
spaceskynet/osr2mp4-core
|
373e0fe3545a0eccfb4cc528b968e3e8e39857f2
|
[
"MIT"
] | 50
|
2020-06-07T10:53:21.000Z
|
2021-12-19T14:47:47.000Z
|
osr2mp4/osrparse/replay.py
|
spaceskynet/osr2mp4-core
|
373e0fe3545a0eccfb4cc528b968e3e8e39857f2
|
[
"MIT"
] | 21
|
2020-07-12T16:02:35.000Z
|
2022-03-01T17:07:37.000Z
|
import _lzma
from osr2mp4.Exceptions import NoDataReplay
from osr2mp4.osrparse.enums import GameMode, Mod
import lzma, struct, datetime
class ReplayEvent(object):
def __init__(self, time_since_previous_action, x, y, keys_pressed):
self.time_since_previous_action = time_since_previous_action
self.x = x
self.y = y
self.keys_pressed = keys_pressed
class Replay(object):
"""
R.A.P.E
R - redz
A - amazing
P - parser for replay
E - epic
"""
game_mode: int = 0
game_version: int = 0
beatmap_hash: str = ''
player_name: str = ''
replay_hash: str = ''
number_300s: int = 0
number_100s: int = 0
number_50s: int = 0
gekis: int = 0
katus: int = 0
misses: int = 0
score: int = 0
max_combo: int = 0
is_perfect_combo: int = 0
mod_combination: [Mod] = []
life_bar_graph: str = ''
timestamp: datetime.datetime = None
play_data: [ReplayEvent] = []
# internal
view: [memoryview, bytes] = None # memoryview or bytes
#Order of field initilization matters.
def __init__(self, replay_data: bytes = None):
if replay_data is not None:
#self.view = memoryview(replay_data) # FireRedz: stupid python cant pickle memoryview
self.view = replay_data
self.parse_replay_and_initialize_fields(replay_data)
else:
self.player_name = "osu!"
self.score = float("inf")
self.is_perfect_combo = 1
self.mod_combination = []
self.timestamp = datetime.datetime.now()
@classmethod
def from_path(cls, path: str):
with open(path, 'rb') as file:
try:
return cls(replay_data=file.read())
except _lzma.LZMAError:
raise NoDataReplay()
except Exception as err:
raise err
def parse_replay_and_initialize_fields(self, replay_data: bytes):
self.game_mode = self.read_byte()
self.game_version = self.read_int()
self.beatmap_hash = self.read_string()
self.player_name = self.read_string()
self.replay_hash = self.read_string()
self.number_300s = self.read_short()
self.number_100s = self.read_short()
self.number_50s = self.read_short()
self.gekis = self.read_short()
self.katus = self.read_short()
self.misses = self.read_short()
self.score = self.read_int()
self.max_combo = self.read_short()
self.is_perfect_combo = self.read_byte()
self.mod_combination = self.read_int()
self.life_bar_graph = self.read_string()
self.timestamp = datetime.datetime.min + datetime.timedelta(microseconds=self.read_long()/10)
self.parse_mod_combination()
self.parse_play_data(replay_data)
def parse_mod_combination(self):
# Generator yielding value of each bit in an integer if it's set + value
# of LSB no matter what .
def bits(n):
if n == 0:
yield 0
while n:
b = n & (~n+1)
yield b
n ^= b
bit_values_gen = bits(self.mod_combination)
self.mod_combination = frozenset(Mod(mod_val) for mod_val in bit_values_gen)
def parse_play_data(self, replay_data: bytes):
frames = []
lzma_len = self.read_int() # aka self.__replay_length
lzma_raw = lzma.decompress(self.read_byte(lzma_len)).decode('ascii')[:-1]
events = [event_raw.split('|') for event_raw in lzma_raw.split(',')]
self.play_data = [
ReplayEvent(
int(event[0]),
float(event[1]),
float(event[2]),
int(event[3])
)
for event in events
]
### NEW
def read_byte(self, length: int = 1):
val = self.view[:length]
self.view = self.view[length:]
return val
def read_short(self):
val = int.from_bytes(self.view[:2], 'little')
self.view = self.view[2:]
return val
def read_int(self):
val = int.from_bytes(self.view[:4], 'little')
self.view = self.view[4:]
return val
def read_float(self):
return self.read_int()
def read_long(self):
val = int.from_bytes(self.view[:8], 'little')
self.view = self.view[8:]
return val
def read_double(self):
return self.read_long()
def read_uleb128(self):
val = shift = 0
while True:
b = int.from_bytes(self.read_byte(), 'little')
val |= ((b & 0b01111111) << shift)
if (b & 0b10000000) == 0x00:
break
shift += 7
return val
def read_string(self):
if self.read_byte() == 0x00:
return ""
raw = self.read_uleb128()
return self.read_byte(raw).decode()
def get(self):
d = self.__dict__
self_dict = {k: d[k] for k in d if k != 'play_data'}
return self_dict
def set(self, state: dict):
self.__dict__ = state
def parse_replay_file(replay_path):
return Replay.from_path(replay_path)
parse_replay = parse_replay_file
| 23.239583
| 95
| 0.68736
|
2d80f954b9490b405b47dd6d6e0f064fee4543a7
| 313
|
py
|
Python
|
models/__init__.py
|
giuseppechecchia/frmoda-barcode-checker
|
1d3c5b97aae23b70057f71a2026ef9b47d42261c
|
[
"Unlicense"
] | null | null | null |
models/__init__.py
|
giuseppechecchia/frmoda-barcode-checker
|
1d3c5b97aae23b70057f71a2026ef9b47d42261c
|
[
"Unlicense"
] | null | null | null |
models/__init__.py
|
giuseppechecchia/frmoda-barcode-checker
|
1d3c5b97aae23b70057f71a2026ef9b47d42261c
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
###############################################################################
# Copyleft (K) 2020-2022
# Developer: Giuseppe Checchia @eldoleo (<https://github.com/giuseppechecchia>)
###############################################################################
from . import barcode_checker
| 44.714286
| 79
| 0.370607
|
37755996135eecf9eb4ea88e48b9dbc15cec53ad
| 11,207
|
py
|
Python
|
src/robot/running/handlers.py
|
christestet/robotframework
|
94e1206bf259bae4c752b5a19d6f875c63934e82
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/robot/running/handlers.py
|
christestet/robotframework
|
94e1206bf259bae4c752b5a19d6f875c63934e82
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-03-01T12:28:43.000Z
|
2022-03-01T12:28:43.000Z
|
src/robot/running/handlers.py
|
greck2908/robotframework
|
94e1206bf259bae4c752b5a19d6f875c63934e82
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import inspect
from robot.utils import (getdoc, getshortdoc, is_list_like, normpath, printable_name,
split_tags_from_doc, type_name)
from robot.errors import DataError
from robot.model import Tags
from .arguments import ArgumentSpec, DynamicArgumentParser, PythonArgumentParser
from .dynamicmethods import GetKeywordSource, GetKeywordTypes
from .librarykeywordrunner import (EmbeddedArgumentsRunner,
LibraryKeywordRunner, RunKeywordRunner)
from .runkwregister import RUN_KW_REGISTER
def Handler(library, name, method):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _RunKeywordHandler(library, name, method)
return _PythonHandler(library, name, method)
def DynamicHandler(library, name, method, doc, argspec, tags=None):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _DynamicRunKeywordHandler(library, name, method, doc, argspec, tags)
return _DynamicHandler(library, name, method, doc, argspec, tags)
def InitHandler(library, method=None, docgetter=None):
return _PythonInitHandler(library, '__init__', method, docgetter)
class _RunnableHandler:
def __init__(self, library, handler_name, handler_method, doc='', tags=None):
self.library = library
self._handler_name = handler_name
self.name = self._get_name(handler_name, handler_method)
self.arguments = self._parse_arguments(handler_method)
self._method = self._get_initial_handler(library, handler_name,
handler_method)
doc, tags_from_doc = split_tags_from_doc(doc or '')
tags_from_attr = self._get_tags_from_attribute(handler_method)
self._doc = doc
self.tags = Tags(tuple(tags_from_doc) +
tuple(tags_from_attr) +
tuple(tags or ()))
def _get_name(self, handler_name, handler_method):
if handler_name == '__init__':
return handler_name
robot_name = getattr(handler_method, 'robot_name', None)
name = robot_name or printable_name(handler_name, code_style=True)
if not name:
raise DataError('Keyword name cannot be empty.')
return name
def _parse_arguments(self, handler_method):
raise NotImplementedError
def _get_tags_from_attribute(self, handler_method):
tags = getattr(handler_method, 'robot_tags', ())
if not is_list_like(tags):
raise DataError("Expected tags to be list-like, got %s."
% type_name(tags))
return tags
def _get_initial_handler(self, library, name, method):
if library.scope.is_global:
return self._get_global_handler(method, name)
return None
def resolve_arguments(self, args, variables=None):
return self.arguments.resolve(args, variables, self.library.converters)
@property
def doc(self):
return self._doc
@property
def longname(self):
return '%s.%s' % (self.library.name, self.name)
@property
def shortdoc(self):
return getshortdoc(self.doc)
@property
def libname(self):
return self.library.name
@property
def source(self):
return self.library.source
@property
def lineno(self):
return -1
def create_runner(self, name):
return LibraryKeywordRunner(self)
def current_handler(self):
if self._method:
return self._method
return self._get_handler(self.library.get_instance(), self._handler_name)
def _get_global_handler(self, method, name):
return method
def _get_handler(self, lib_instance, handler_name):
try:
return getattr(lib_instance, handler_name)
except AttributeError:
# Occurs with old-style classes.
if handler_name == '__init__':
return None
raise
class _PythonHandler(_RunnableHandler):
def __init__(self, library, handler_name, handler_method):
_RunnableHandler.__init__(self, library, handler_name, handler_method,
getdoc(handler_method))
def _parse_arguments(self, handler_method):
return PythonArgumentParser().parse(handler_method, self.longname)
@property
def source(self):
handler = self.current_handler()
# `getsourcefile` can return None and raise TypeError.
try:
source = inspect.getsourcefile(inspect.unwrap(handler))
except TypeError:
source = None
return normpath(source) if source else self.library.source
@property
def lineno(self):
handler = self.current_handler()
try:
lines, start_lineno = inspect.getsourcelines(inspect.unwrap(handler))
except (TypeError, OSError, IOError):
return -1
for increment, line in enumerate(lines):
if line.strip().startswith('def '):
return start_lineno + increment
return start_lineno
class _DynamicHandler(_RunnableHandler):
def __init__(self, library, handler_name, dynamic_method, doc='',
argspec=None, tags=None):
self._argspec = argspec
self._run_keyword_method_name = dynamic_method.name
self._supports_kwargs = dynamic_method.supports_kwargs
_RunnableHandler.__init__(self, library, handler_name,
dynamic_method.method, doc, tags)
self._source_info = None
def _parse_arguments(self, handler_method):
spec = DynamicArgumentParser().parse(self._argspec, self.longname)
if not self._supports_kwargs:
if spec.var_named:
raise DataError("Too few '%s' method parameters for **kwargs "
"support." % self._run_keyword_method_name)
if spec.named_only:
raise DataError("Too few '%s' method parameters for "
"keyword-only arguments support."
% self._run_keyword_method_name)
get_keyword_types = GetKeywordTypes(self.library.get_instance())
spec.types = get_keyword_types(self._handler_name)
return spec
@property
def source(self):
if self._source_info is None:
self._source_info = self._get_source_info()
return self._source_info[0]
def _get_source_info(self):
get_keyword_source = GetKeywordSource(self.library.get_instance())
try:
source = get_keyword_source(self._handler_name)
except DataError as err:
self.library.report_error(
f"Getting source information for keyword '{self.name}' failed: {err}",
err.details
)
source = None
if source and ':' in source and source.rsplit(':', 1)[1].isdigit():
source, lineno = source.rsplit(':', 1)
lineno = int(lineno)
else:
lineno = -1
return normpath(source) if source else self.library.source, lineno
@property
def lineno(self):
if self._source_info is None:
self._source_info = self._get_source_info()
return self._source_info[1]
def resolve_arguments(self, arguments, variables=None):
positional, named = super().resolve_arguments(arguments, variables)
if not self._supports_kwargs:
positional, named = self.arguments.map(positional, named)
return positional, named
def _get_handler(self, lib_instance, handler_name):
runner = getattr(lib_instance, self._run_keyword_method_name)
return self._get_dynamic_handler(runner, handler_name)
def _get_global_handler(self, method, name):
return self._get_dynamic_handler(method, name)
def _get_dynamic_handler(self, runner, name):
def handler(*positional, **kwargs):
if self._supports_kwargs:
return runner(name, positional, kwargs)
else:
return runner(name, positional)
return handler
class _RunKeywordHandler(_PythonHandler):
def create_runner(self, name):
default_dry_run_keywords = ('name' in self.arguments.positional and
self._args_to_process)
return RunKeywordRunner(self, default_dry_run_keywords)
@property
def _args_to_process(self):
return RUN_KW_REGISTER.get_args_to_process(self.library.orig_name,
self.name)
def resolve_arguments(self, args, variables=None):
return self.arguments.resolve(args, variables, self.library.converters,
resolve_named=False,
resolve_variables_until=self._args_to_process)
class _DynamicRunKeywordHandler(_DynamicHandler, _RunKeywordHandler):
_parse_arguments = _RunKeywordHandler._parse_arguments
resolve_arguments = _RunKeywordHandler.resolve_arguments
class _PythonInitHandler(_PythonHandler):
def __init__(self, library, handler_name, handler_method, docgetter):
_PythonHandler.__init__(self, library, handler_name, handler_method)
self._docgetter = docgetter
@property
def doc(self):
if self._docgetter:
self._doc = self._docgetter() or self._doc
self._docgetter = None
return self._doc
def _parse_arguments(self, init_method):
parser = PythonArgumentParser(type='Library')
return parser.parse(init_method or (lambda: None), self.library.name)
class EmbeddedArgumentsHandler:
def __init__(self, name_regexp, orig_handler):
self.arguments = ArgumentSpec() # Show empty argument spec for Libdoc
self.name_regexp = name_regexp
self._orig_handler = orig_handler
def __getattr__(self, item):
return getattr(self._orig_handler, item)
@property
def library(self):
return self._orig_handler.library
@library.setter
def library(self, library):
self._orig_handler.library = library
def matches(self, name):
return self.name_regexp.match(name) is not None
def create_runner(self, name):
return EmbeddedArgumentsRunner(self, name)
def __copy__(self):
orig_handler = copy(self._orig_handler)
return EmbeddedArgumentsHandler(self.name_regexp, orig_handler)
| 36.03537
| 86
| 0.658249
|
a4507eb176fd80ebc5ad51b1f84df59d787e60e3
| 4,300
|
py
|
Python
|
benchmark/startQiskit1291.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1291.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1291.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=52
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.cx(input_qubit[2],input_qubit[0]) # number=45
prog.cx(input_qubit[2],input_qubit[0]) # number=49
prog.z(input_qubit[2]) # number=50
prog.cx(input_qubit[2],input_qubit[0]) # number=51
prog.cx(input_qubit[2],input_qubit[0]) # number=47
prog.h(input_qubit[1]) # number=4
prog.rx(2.664070570244145,input_qubit[1]) # number=39
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.cx(input_qubit[3],input_qubit[2]) # number=48
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[3]) # number=40
prog.y(input_qubit[4]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[1],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[1],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[0]) # number=34
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.cx(input_qubit[2],input_qubit[3]) # number=44
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1291.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.330827
| 82
| 0.614186
|
5b4ef6c33fa07afbda2d1c26146ceddbddf63fc2
| 3,844
|
py
|
Python
|
hc/ui/monitor/monitor.py
|
sorki/hacked_cnc
|
cbdae71f350fb56cd8cc7bd911c4673e98b6adf0
|
[
"BSD-3-Clause"
] | 2
|
2017-01-25T21:14:16.000Z
|
2017-11-16T09:25:38.000Z
|
hc/ui/monitor/monitor.py
|
sorki/hacked_cnc
|
cbdae71f350fb56cd8cc7bd911c4673e98b6adf0
|
[
"BSD-3-Clause"
] | null | null | null |
hc/ui/monitor/monitor.py
|
sorki/hacked_cnc
|
cbdae71f350fb56cd8cc7bd911c4673e98b6adf0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python2
import sys
from PyQt5.QtCore import pyqtSlot, QTimer
from PyQt5.QtGui import QColor, QTextCursor
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QFileDialog
from PyQt5.uic import loadUi
from PyQt5.QtNetwork import QTcpSocket, QAbstractSocket
class Main(QMainWindow):
def __init__(self, *args):
super(Main, self).__init__(*args)
loadUi('mainwindow.ui', self)
self.host.setText("localhost")
self.port.setText("11010")
self.prompt.setText("M114")
self.conn = QTcpSocket(self)
self.conn.readyRead.connect(self.readSocket)
self.conn.error.connect(self.socketError)
self.conn.connected.connect(self.socketConnect)
self.conn.disconnected.connect(self.socketDisconnect)
self.connected = False
self.actionSave.triggered.connect(self.save)
self.prompt.setFocus()
self.do_connect()
def append(self, text):
self.text.append(text)
if self.autoscroll.isChecked():
c = self.text.textCursor()
c.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
self.text.setTextCursor(c)
def readSocket(self):
r = self.conn.readAll()
if r:
r = str(r).strip()
for chunk in r.splitlines():
if chunk:
if chunk[0] == '<':
self.text.setTextColor(QColor(200, 0, 0))
elif chunk[0] == '>':
self.text.setTextColor(QColor(0, 200, 0))
else:
self.text.setTextColor(QColor(0, 0, 200))
self.append(chunk)
def info(self, errtext):
self.text.setTextColor(QColor(20, 20, 20))
self.append(errtext)
def err(self, errtext):
self.text.setTextColor(QColor(100, 0, 0))
self.append(errtext)
def socketDisconnect(self):
self.connected = False
self.err("Disconnected")
def socketConnect(self):
self.connected = True
self.info("Connected")
def socketError(self, socketError):
if socketError == QAbstractSocket.RemoteHostClosedError:
pass
elif socketError == QAbstractSocket.HostNotFoundError:
self.err("The host was not found. Please check the host name and "
"port settings.")
elif socketError == QAbstractSocket.ConnectionRefusedError:
self.err("The connection was refused by the peer. Make sure the "
"server is running, and check that the host name "
"and port settings are correct.")
else:
self.err("The following error occurred: {0}"
.format(self.conn.errorString()))
def save(self):
fname, sel = QFileDialog.getSaveFileName(
self,
'Save Log',)
#'/path/to/default/directory', FIXME: lastused
#selectedFilter='*.txt')
if fname:
with open(fname, 'w+') as f:
f.write(self.text.toPlainText())
def do_connect(self):
self.conn.abort()
self.conn.connectToHost(self.host.text(),
int(self.port.text()))
@pyqtSlot()
def on_connect_clicked(self):
self.do_connect()
self.prompt.setFocus()
@pyqtSlot()
def on_send_clicked(self):
if not self.connected:
self.err("Not connected")
return
out = (self.prompt.text() + '\n').encode('ascii')
self.conn.write(out)
@pyqtSlot()
def on_right_on_clicked(self):
self.action("e1")
def main():
app = QApplication(sys.argv)
widget = Main()
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 28.902256
| 78
| 0.577784
|
01a745276ae2bd46384a1db0669e10f6d99293b5
| 3,162
|
py
|
Python
|
settings.py
|
BertrandWiart/farmbot2019eseo
|
3502f308f94e5e4f970d5d9dae1a1f1ebb9c60dc
|
[
"MIT"
] | null | null | null |
settings.py
|
BertrandWiart/farmbot2019eseo
|
3502f308f94e5e4f970d5d9dae1a1f1ebb9c60dc
|
[
"MIT"
] | null | null | null |
settings.py
|
BertrandWiart/farmbot2019eseo
|
3502f308f94e5e4f970d5d9dae1a1f1ebb9c60dc
|
[
"MIT"
] | null | null | null |
from farmware_tools import device
commands = {
'rq_connect' : 1,
'rsp_connectSuccess' : 2,
'rsp_connectFail' : 3
}
#IP = "localhost"
IP = "172.14.1.23"
PORT = 36100
PASS = "farmbot"
def sendMsg(socket, order, msg="", encoding="utf-8"):
device.log(message='Setting sending message !', message_type='success')
#print("==== SENDING MSG ====")
#print("Sending {} message".format(getDictKey(order)))
size = len(msg)
byte_size = size.to_bytes(3, byteorder="big", signed=False)
header = bytearray(4)
header[0] = order
for i in range(3):
header[i+1] = byte_size[i]
if encoding == "bytes":
msgBytes = msg
else:
msgBytes = bytearray(msg, encoding)
sendData = header + msgBytes
#print("Message sent : {}".format(sendData))
#print("=====================")
socket.send(sendData)
def getDictKey(value):
for key, val in commands.items():
if val == value:
return key
return None
def receiveHeader(socket):
try :
headerBytes = bytearray(socket.recv(4))
#print("Header : {}".format(headerBytes))
device.log(message='Setting receiveHEader !', message_type='success')
order, size = None, None
if len(headerBytes) > 0:
order = getDictKey(headerBytes[0])
#print("::::::::::\nSIZE : {}".format(headerBytes[1:]))
headerBytes[0] = 0
size = int.from_bytes(headerBytes, byteorder='big')
#print("Order : {}".format(order))
#print("Size : {}\n::::::::::".format(size))
except ConnectionResetError:
#print("Connection reset by peer")
order = 0
size = 0
return order, size
def receiveMsg(socket, side="client"):
#print("==== RECEIVING MSG ====")
device.log(message='Settings ReceiveMEssage !', message_type='success')
order, size = receiveHeader(socket)
if order == None:
#print("[ERROR] Order = None")
return False, None
dataBytes = bytearray()
if (size>0):
dataBytes = socket.recv(size)
if side == "server":
if order == "rq_connect":
#print("Password received : {}".format(dataBytes.decode()))
if (dataBytes.decode() == PASS):
#print("Good password !")
sendMsg(socket, commands["rsp_connectSuccess"])
return True, None
else:
#print("Bad password !")
sendMsg(socket, commands["rsp_connectFail"])
return False, None
else:
#print("[ERROR] Received a message destinated to a client, but i'm a server")
return False, None
else:
if order == "rsp_connectSuccess":
#print("Connection success !")
return True, None, order, size
elif order == "rsp_connectFail":
#print("Connection failed !")
return False, None, order, size
else:
#print("[ERROR] Received a message destinated to a server, but i'm a client")
return False, None, order, size
#print("=======================")
| 27.982301
| 89
| 0.555028
|
697ee47700613aab777605a07fb4094759db7f39
| 2,179
|
py
|
Python
|
setup.py
|
MikeHart85/lewis_ophyd_playground
|
8568e98deefc6a8cea5510165f5afe9679fcf3df
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
MikeHart85/lewis_ophyd_playground
|
8568e98deefc6a8cea5510165f5afe9679fcf3df
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
MikeHart85/lewis_ophyd_playground
|
8568e98deefc6a8cea5510165f5afe9679fcf3df
|
[
"BSD-3-Clause"
] | null | null | null |
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
lewis_ophyd_playground does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='lewis_ophyd_playground',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python package for doing science.",
long_description=readme,
author="Michael Hart",
author_email='mhart@bnl.gov',
url='https://github.com/MikeHart85/lewis_ophyd_playground',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'lewis_ophyd_playground': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
| 32.044118
| 77
| 0.673245
|
855ea2648de4926f8259268b541e7400d99a6cef
| 1,748
|
py
|
Python
|
speech-emotion/speech-emotion-model.py
|
jinwkim/emotion-detection
|
54eec1ebdb9622adb3149662ca1e9d85ce1caaea
|
[
"MIT"
] | null | null | null |
speech-emotion/speech-emotion-model.py
|
jinwkim/emotion-detection
|
54eec1ebdb9622adb3149662ca1e9d85ce1caaea
|
[
"MIT"
] | null | null | null |
speech-emotion/speech-emotion-model.py
|
jinwkim/emotion-detection
|
54eec1ebdb9622adb3149662ca1e9d85ce1caaea
|
[
"MIT"
] | null | null | null |
import numpy as np
from pydub import AudioSegment, effects
import librosa
import noisereduce as nr
import tensorflow as tf
import keras
from keras.models import Sequential
from keras import layers
from keras import optimizers
from keras import callbacks
import sklearn
from sklearn.model_selection import train_test_split
batch_size = 23
x = np.load('data/features.npy')
y = np.load('data/emotions.npy')
print(x.shape)
print(y.shape)
# split data into train, test, and validation sets
x_train, x_tosplit, y_train, y_tosplit = train_test_split(x, y, test_size = 0.125, random_state = 1)
x_val, x_test, y_val, y_test = train_test_split(x_tosplit, y_tosplit, test_size = 0.304, random_state = 1)
y_train_class = tf.keras.utils.to_categorical(y_train, 8, dtype = 'int8')
y_val_class = tf.keras.utils.to_categorical(y_val, 8, dtype = 'int8')
y_test_class = tf.keras.utils.to_categorical(y_test, 8, dtype = 'int8')
print(np.shape(x_train))
print(np.shape(x_val))
print(np.shape(x_test))
# creating LTSM (Long Short-Term Memory) model
model = Sequential()
model.add(layers.LSTM(64, return_sequences = True, input_shape = (x.shape[1:3])))
model.add(layers.LSTM(64))
model.add(layers.Dense(8, activation = 'softmax'))
model.compile(loss='categorical_crossentropy', optimizer='RMSProp', metrics=['categorical_accuracy'])
model.fit(x_train, y_train_class,
epochs=340, batch_size = batch_size,
validation_data = (x_val, y_val_class))
model.save('../models/speech_emotion.h5')
# evaluate with validation and testing sets
val_loss, val_acc = model.evaluate(x_val, y_val_class, verbose = 2)
print('validation accuracy', val_acc)
test_loss, test_acc = model.evaluate(x_test, y_test_class, verbose = 2)
print('testing accuracy', test_acc)
| 30.666667
| 106
| 0.765446
|
6a1e87581bec232bd41ab3f2a6ea7de76fc8ddca
| 31,374
|
py
|
Python
|
ST_spectrum/Ui_ST_main.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | 1
|
2022-03-18T18:38:02.000Z
|
2022-03-18T18:38:02.000Z
|
ST_spectrum/Ui_ST_main.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | null | null | null |
ST_spectrum/Ui_ST_main.py
|
cygnushan/measurement
|
644e8b698faf50dcc86d88834675d6adf1281b10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G:\WorkDir\gas-sensing_resistors\ST_spectrum\ST_main.ui'
#
# Created: Tue Jan 19 22:08:53 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from Rtime_mplCanvas import Rt_CanvasWidget
from RT_mplCanvas import RT_CanvasWidget
from ST_mplCanvas import ST_CanvasWidget
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ST_APP(object):
def setupUi(self, ST_APP):
ST_APP.setObjectName(_fromUtf8("ST_APP"))
ST_APP.resize(800, 600)
ST_APP.setMinimumSize(QtCore.QSize(800, 600))
ST_APP.setMaximumSize(QtCore.QSize(800, 600))
font = QtGui.QFont()
font.setPointSize(12)
ST_APP.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/lmd.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ST_APP.setWindowIcon(icon)
self.verticalLayout_13 = QtGui.QVBoxLayout(ST_APP)
self.verticalLayout_13.setObjectName(_fromUtf8("verticalLayout_13"))
self.verticalLayout_12 = QtGui.QVBoxLayout()
self.verticalLayout_12.setObjectName(_fromUtf8("verticalLayout_12"))
self.horizontalLayout_15 = QtGui.QHBoxLayout()
self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15"))
self.verticalLayout_10 = QtGui.QVBoxLayout()
self.verticalLayout_10.setObjectName(_fromUtf8("verticalLayout_10"))
self.ST_MPLS = QtGui.QStackedWidget(ST_APP)
self.ST_MPLS.setMinimumSize(QtCore.QSize(480, 320))
self.ST_MPLS.setMaximumSize(QtCore.QSize(480, 320))
font = QtGui.QFont()
font.setPointSize(12)
self.ST_MPLS.setFont(font)
self.ST_MPLS.setObjectName(_fromUtf8("ST_MPLS"))
self.Rt_MPL = Rt_CanvasWidget()
self.Rt_MPL.setObjectName(_fromUtf8("Rt_MPL"))
self.ST_MPLS.addWidget(self.Rt_MPL)
self.RT_MPL = RT_CanvasWidget()
self.RT_MPL.setObjectName(_fromUtf8("RT_MPL"))
self.ST_MPLS.addWidget(self.RT_MPL)
self.ST_MPL = ST_CanvasWidget()
self.ST_MPL.setObjectName(_fromUtf8("ST_MPL"))
self.ST_MPLS.addWidget(self.ST_MPL)
self.verticalLayout_10.addWidget(self.ST_MPLS)
self.log_state = QtGui.QCheckBox(ST_APP)
self.log_state.setObjectName(_fromUtf8("log_state"))
self.verticalLayout_10.addWidget(self.log_state)
self.groupBox_5 = QtGui.QGroupBox(ST_APP)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.groupBox_5)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_19 = QtGui.QHBoxLayout()
self.horizontalLayout_19.setObjectName(_fromUtf8("horizontalLayout_19"))
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.label_18 = QtGui.QLabel(self.groupBox_5)
self.label_18.setMinimumSize(QtCore.QSize(64, 32))
self.label_18.setMaximumSize(QtCore.QSize(64, 32))
self.label_18.setObjectName(_fromUtf8("label_18"))
self.horizontalLayout_12.addWidget(self.label_18)
self.run_time = QtGui.QLineEdit(self.groupBox_5)
self.run_time.setMinimumSize(QtCore.QSize(113, 22))
self.run_time.setMaximumSize(QtCore.QSize(113, 22))
self.run_time.setReadOnly(True)
self.run_time.setObjectName(_fromUtf8("run_time"))
self.horizontalLayout_12.addWidget(self.run_time)
self.label_5 = QtGui.QLabel(self.groupBox_5)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_12.addWidget(self.label_5)
self.horizontalLayout_19.addLayout(self.horizontalLayout_12)
self.horizontalLayout_18 = QtGui.QHBoxLayout()
self.horizontalLayout_18.setObjectName(_fromUtf8("horizontalLayout_18"))
self.label_19 = QtGui.QLabel(self.groupBox_5)
self.label_19.setMinimumSize(QtCore.QSize(56, 32))
self.label_19.setMaximumSize(QtCore.QSize(56, 32))
self.label_19.setObjectName(_fromUtf8("label_19"))
self.horizontalLayout_18.addWidget(self.label_19)
self.flow1 = QtGui.QLineEdit(self.groupBox_5)
self.flow1.setMinimumSize(QtCore.QSize(113, 22))
self.flow1.setMaximumSize(QtCore.QSize(113, 22))
self.flow1.setReadOnly(True)
self.flow1.setObjectName(_fromUtf8("flow1"))
self.horizontalLayout_18.addWidget(self.flow1)
self.label_7 = QtGui.QLabel(self.groupBox_5)
self.label_7.setMinimumSize(QtCore.QSize(48, 32))
self.label_7.setMaximumSize(QtCore.QSize(48, 32))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_18.addWidget(self.label_7)
self.f1_open = QtGui.QCheckBox(self.groupBox_5)
self.f1_open.setText(_fromUtf8(""))
self.f1_open.setObjectName(_fromUtf8("f1_open"))
self.horizontalLayout_18.addWidget(self.f1_open)
self.horizontalLayout_19.addLayout(self.horizontalLayout_18)
self.verticalLayout_7.addLayout(self.horizontalLayout_19)
self.horizontalLayout_20 = QtGui.QHBoxLayout()
self.horizontalLayout_20.setObjectName(_fromUtf8("horizontalLayout_20"))
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.label_20 = QtGui.QLabel(self.groupBox_5)
self.label_20.setMinimumSize(QtCore.QSize(64, 32))
self.label_20.setMaximumSize(QtCore.QSize(64, 32))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.horizontalLayout_13.addWidget(self.label_20)
self.now_R = QtGui.QLineEdit(self.groupBox_5)
self.now_R.setMinimumSize(QtCore.QSize(113, 22))
self.now_R.setMaximumSize(QtCore.QSize(113, 22))
self.now_R.setReadOnly(True)
self.now_R.setObjectName(_fromUtf8("now_R"))
self.horizontalLayout_13.addWidget(self.now_R)
self.label_6 = QtGui.QLabel(self.groupBox_5)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_13.addWidget(self.label_6)
self.horizontalLayout_20.addLayout(self.horizontalLayout_13)
self.horizontalLayout_17 = QtGui.QHBoxLayout()
self.horizontalLayout_17.setObjectName(_fromUtf8("horizontalLayout_17"))
self.label_26 = QtGui.QLabel(self.groupBox_5)
self.label_26.setMinimumSize(QtCore.QSize(56, 32))
self.label_26.setMaximumSize(QtCore.QSize(56, 32))
self.label_26.setObjectName(_fromUtf8("label_26"))
self.horizontalLayout_17.addWidget(self.label_26)
self.flow2 = QtGui.QLineEdit(self.groupBox_5)
self.flow2.setMinimumSize(QtCore.QSize(113, 22))
self.flow2.setMaximumSize(QtCore.QSize(113, 22))
self.flow2.setReadOnly(True)
self.flow2.setObjectName(_fromUtf8("flow2"))
self.horizontalLayout_17.addWidget(self.flow2)
self.label_8 = QtGui.QLabel(self.groupBox_5)
self.label_8.setMinimumSize(QtCore.QSize(48, 32))
self.label_8.setMaximumSize(QtCore.QSize(48, 32))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout_17.addWidget(self.label_8)
self.f2_open = QtGui.QCheckBox(self.groupBox_5)
self.f2_open.setText(_fromUtf8(""))
self.f2_open.setObjectName(_fromUtf8("f2_open"))
self.horizontalLayout_17.addWidget(self.f2_open)
self.horizontalLayout_20.addLayout(self.horizontalLayout_17)
self.verticalLayout_7.addLayout(self.horizontalLayout_20)
self.horizontalLayout_21 = QtGui.QHBoxLayout()
self.horizontalLayout_21.setObjectName(_fromUtf8("horizontalLayout_21"))
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.label_27 = QtGui.QLabel(self.groupBox_5)
self.label_27.setMinimumSize(QtCore.QSize(64, 32))
self.label_27.setMaximumSize(QtCore.QSize(64, 32))
self.label_27.setObjectName(_fromUtf8("label_27"))
self.horizontalLayout_14.addWidget(self.label_27)
self.now_T = QtGui.QLineEdit(self.groupBox_5)
self.now_T.setMinimumSize(QtCore.QSize(113, 22))
self.now_T.setMaximumSize(QtCore.QSize(113, 22))
self.now_T.setReadOnly(True)
self.now_T.setObjectName(_fromUtf8("now_T"))
self.horizontalLayout_14.addWidget(self.now_T)
self.label_4 = QtGui.QLabel(self.groupBox_5)
self.label_4.setMinimumSize(QtCore.QSize(0, 16))
self.label_4.setMaximumSize(QtCore.QSize(32, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_14.addWidget(self.label_4)
self.horizontalLayout_21.addLayout(self.horizontalLayout_14)
self.horizontalLayout_16 = QtGui.QHBoxLayout()
self.horizontalLayout_16.setObjectName(_fromUtf8("horizontalLayout_16"))
self.label_28 = QtGui.QLabel(self.groupBox_5)
self.label_28.setMinimumSize(QtCore.QSize(56, 32))
self.label_28.setMaximumSize(QtCore.QSize(56, 32))
self.label_28.setObjectName(_fromUtf8("label_28"))
self.horizontalLayout_16.addWidget(self.label_28)
self.flow3 = QtGui.QLineEdit(self.groupBox_5)
self.flow3.setMinimumSize(QtCore.QSize(113, 22))
self.flow3.setMaximumSize(QtCore.QSize(113, 22))
self.flow3.setReadOnly(True)
self.flow3.setObjectName(_fromUtf8("flow3"))
self.horizontalLayout_16.addWidget(self.flow3)
self.label_9 = QtGui.QLabel(self.groupBox_5)
self.label_9.setMinimumSize(QtCore.QSize(48, 32))
self.label_9.setMaximumSize(QtCore.QSize(48, 32))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout_16.addWidget(self.label_9)
self.f3_open = QtGui.QCheckBox(self.groupBox_5)
self.f3_open.setText(_fromUtf8(""))
self.f3_open.setObjectName(_fromUtf8("f3_open"))
self.horizontalLayout_16.addWidget(self.f3_open)
self.horizontalLayout_21.addLayout(self.horizontalLayout_16)
self.verticalLayout_7.addLayout(self.horizontalLayout_21)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.verticalLayout_10.addWidget(self.groupBox_5)
self.horizontalLayout_15.addLayout(self.verticalLayout_10)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setSpacing(20)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.groupBox_15 = QtGui.QGroupBox(ST_APP)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_15.sizePolicy().hasHeightForWidth())
self.groupBox_15.setSizePolicy(sizePolicy)
self.groupBox_15.setMinimumSize(QtCore.QSize(281, 120))
self.groupBox_15.setMaximumSize(QtCore.QSize(281, 120))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_15.setFont(font)
self.groupBox_15.setObjectName(_fromUtf8("groupBox_15"))
self.verticalLayout_9 = QtGui.QVBoxLayout(self.groupBox_15)
self.verticalLayout_9.setSpacing(10)
self.verticalLayout_9.setContentsMargins(10, 0, 10, 0)
self.verticalLayout_9.setObjectName(_fromUtf8("verticalLayout_9"))
self.verticalLayout_4 = QtGui.QVBoxLayout()
self.verticalLayout_4.setSpacing(10)
self.verticalLayout_4.setMargin(0)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.horizontalLayout_32 = QtGui.QHBoxLayout()
self.horizontalLayout_32.setObjectName(_fromUtf8("horizontalLayout_32"))
self.label_16 = QtGui.QLabel(self.groupBox_15)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.horizontalLayout_32.addWidget(self.label_16)
self.sample_id = QtGui.QLineEdit(self.groupBox_15)
self.sample_id.setObjectName(_fromUtf8("sample_id"))
self.horizontalLayout_32.addWidget(self.sample_id)
self.verticalLayout_4.addLayout(self.horizontalLayout_32)
self.horizontalLayout_33 = QtGui.QHBoxLayout()
self.horizontalLayout_33.setObjectName(_fromUtf8("horizontalLayout_33"))
self.label_21 = QtGui.QLabel(self.groupBox_15)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.horizontalLayout_33.addWidget(self.label_21)
self.save_path = QtGui.QLineEdit(self.groupBox_15)
self.save_path.setObjectName(_fromUtf8("save_path"))
self.horizontalLayout_33.addWidget(self.save_path)
self.btn_savepath = QtGui.QPushButton(self.groupBox_15)
self.btn_savepath.setText(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/folder.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btn_savepath.setIcon(icon1)
self.btn_savepath.setIconSize(QtCore.QSize(16, 16))
self.btn_savepath.setObjectName(_fromUtf8("btn_savepath"))
self.horizontalLayout_33.addWidget(self.btn_savepath)
self.verticalLayout_4.addLayout(self.horizontalLayout_33)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setSpacing(10)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.horizontalLayout_35 = QtGui.QHBoxLayout()
self.horizontalLayout_35.setObjectName(_fromUtf8("horizontalLayout_35"))
self.label_24 = QtGui.QLabel(self.groupBox_15)
self.label_24.setMinimumSize(QtCore.QSize(36, 24))
self.label_24.setMaximumSize(QtCore.QSize(36, 24))
self.label_24.setObjectName(_fromUtf8("label_24"))
self.horizontalLayout_35.addWidget(self.label_24)
self.sample_area = QtGui.QLineEdit(self.groupBox_15)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sample_area.sizePolicy().hasHeightForWidth())
self.sample_area.setSizePolicy(sizePolicy)
self.sample_area.setMinimumSize(QtCore.QSize(40, 22))
self.sample_area.setMaximumSize(QtCore.QSize(40, 22))
self.sample_area.setText(_fromUtf8(""))
self.sample_area.setObjectName(_fromUtf8("sample_area"))
self.horizontalLayout_35.addWidget(self.sample_area)
self.label_25 = QtGui.QLabel(self.groupBox_15)
self.label_25.setMinimumSize(QtCore.QSize(32, 29))
self.label_25.setMaximumSize(QtCore.QSize(32, 29))
font = QtGui.QFont()
font.setPointSize(12)
self.label_25.setFont(font)
self.label_25.setObjectName(_fromUtf8("label_25"))
self.horizontalLayout_35.addWidget(self.label_25)
self.horizontalLayout_8.addLayout(self.horizontalLayout_35)
self.horizontalLayout_34 = QtGui.QHBoxLayout()
self.horizontalLayout_34.setObjectName(_fromUtf8("horizontalLayout_34"))
self.label_22 = QtGui.QLabel(self.groupBox_15)
self.label_22.setMinimumSize(QtCore.QSize(36, 29))
self.label_22.setMaximumSize(QtCore.QSize(36, 29))
self.label_22.setObjectName(_fromUtf8("label_22"))
self.horizontalLayout_34.addWidget(self.label_22)
self.sample_height = QtGui.QLineEdit(self.groupBox_15)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sample_height.sizePolicy().hasHeightForWidth())
self.sample_height.setSizePolicy(sizePolicy)
self.sample_height.setMinimumSize(QtCore.QSize(40, 22))
self.sample_height.setMaximumSize(QtCore.QSize(40, 22))
self.sample_height.setText(_fromUtf8(""))
self.sample_height.setObjectName(_fromUtf8("sample_height"))
self.horizontalLayout_34.addWidget(self.sample_height)
self.label_23 = QtGui.QLabel(self.groupBox_15)
self.label_23.setMinimumSize(QtCore.QSize(23, 29))
self.label_23.setMaximumSize(QtCore.QSize(23, 29))
font = QtGui.QFont()
font.setPointSize(12)
self.label_23.setFont(font)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.horizontalLayout_34.addWidget(self.label_23)
self.horizontalLayout_8.addLayout(self.horizontalLayout_34)
self.verticalLayout_4.addLayout(self.horizontalLayout_8)
self.verticalLayout_9.addLayout(self.verticalLayout_4)
self.verticalLayout_5.addWidget(self.groupBox_15)
self.groupBox_2 = QtGui.QGroupBox(ST_APP)
self.groupBox_2.setMinimumSize(QtCore.QSize(281, 131))
self.groupBox_2.setMaximumSize(QtCore.QSize(281, 131))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_11 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_11.setSpacing(10)
self.verticalLayout_11.setMargin(10)
self.verticalLayout_11.setObjectName(_fromUtf8("verticalLayout_11"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(20)
self.verticalLayout.setContentsMargins(0, 10, 0, 10)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setSpacing(20)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.INST_SET = QtGui.QPushButton(self.groupBox_2)
self.INST_SET.setObjectName(_fromUtf8("INST_SET"))
self.horizontalLayout.addWidget(self.INST_SET)
self.AI518P_SET = QtGui.QPushButton(self.groupBox_2)
self.AI518P_SET.setObjectName(_fromUtf8("AI518P_SET"))
self.horizontalLayout.addWidget(self.AI518P_SET)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setSpacing(20)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.GAS_SET = QtGui.QPushButton(self.groupBox_2)
self.GAS_SET.setObjectName(_fromUtf8("GAS_SET"))
self.horizontalLayout_9.addWidget(self.GAS_SET)
self.COORD_SET = QtGui.QPushButton(self.groupBox_2)
self.COORD_SET.setObjectName(_fromUtf8("COORD_SET"))
self.horizontalLayout_9.addWidget(self.COORD_SET)
self.verticalLayout.addLayout(self.horizontalLayout_9)
self.verticalLayout_11.addLayout(self.verticalLayout)
self.verticalLayout_5.addWidget(self.groupBox_2)
self.groupBox_4 = QtGui.QGroupBox(ST_APP)
self.groupBox_4.setMinimumSize(QtCore.QSize(281, 111))
self.groupBox_4.setMaximumSize(QtCore.QSize(281, 111))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setMargin(10)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setSpacing(10)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.Rt_Curve = QtGui.QRadioButton(self.groupBox_4)
self.Rt_Curve.setChecked(True)
self.Rt_Curve.setObjectName(_fromUtf8("Rt_Curve"))
self.verticalLayout_3.addWidget(self.Rt_Curve)
self.RT_Curve = QtGui.QRadioButton(self.groupBox_4)
self.RT_Curve.setObjectName(_fromUtf8("RT_Curve"))
self.verticalLayout_3.addWidget(self.RT_Curve)
self.ST_Curve = QtGui.QRadioButton(self.groupBox_4)
self.ST_Curve.setObjectName(_fromUtf8("ST_Curve"))
self.verticalLayout_3.addWidget(self.ST_Curve)
self.verticalLayout_6.addLayout(self.verticalLayout_3)
self.verticalLayout_5.addWidget(self.groupBox_4)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.ST_start = QtGui.QPushButton(ST_APP)
font = QtGui.QFont()
font.setPointSize(12)
self.ST_start.setFont(font)
self.ST_start.setObjectName(_fromUtf8("ST_start"))
self.horizontalLayout_2.addWidget(self.ST_start)
self.ST_stop = QtGui.QPushButton(ST_APP)
font = QtGui.QFont()
font.setPointSize(12)
self.ST_stop.setFont(font)
self.ST_stop.setObjectName(_fromUtf8("ST_stop"))
self.horizontalLayout_2.addWidget(self.ST_stop)
self.ST_save = QtGui.QPushButton(ST_APP)
font = QtGui.QFont()
font.setPointSize(12)
self.ST_save.setFont(font)
self.ST_save.setObjectName(_fromUtf8("ST_save"))
self.horizontalLayout_2.addWidget(self.ST_save)
self.verticalLayout_5.addLayout(self.horizontalLayout_2)
self.horizontalLayout_15.addLayout(self.verticalLayout_5)
self.verticalLayout_12.addLayout(self.horizontalLayout_15)
self.groupBox_3 = QtGui.QGroupBox(ST_APP)
self.groupBox_3.setMinimumSize(QtCore.QSize(780, 61))
self.groupBox_3.setMaximumSize(QtCore.QSize(780, 61))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.inst_sta = QtGui.QLabel(self.groupBox_3)
self.inst_sta.setText(_fromUtf8(""))
self.inst_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/noyb.png")))
self.inst_sta.setObjectName(_fromUtf8("inst_sta"))
self.horizontalLayout_4.addWidget(self.inst_sta)
self.pcb_sta = QtGui.QLabel(self.groupBox_3)
self.pcb_sta.setText(_fromUtf8(""))
self.pcb_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/nodlb.png")))
self.pcb_sta.setObjectName(_fromUtf8("pcb_sta"))
self.horizontalLayout_4.addWidget(self.pcb_sta)
self.ai518_sta = QtGui.QLabel(self.groupBox_3)
self.ai518_sta.setText(_fromUtf8(""))
self.ai518_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/nowky.png")))
self.ai518_sta.setObjectName(_fromUtf8("ai518_sta"))
self.horizontalLayout_4.addWidget(self.ai518_sta)
self.horizontalLayout_11.addLayout(self.horizontalLayout_4)
self.sys_state = QtGui.QLineEdit(self.groupBox_3)
self.sys_state.setEnabled(False)
self.sys_state.setObjectName(_fromUtf8("sys_state"))
self.horizontalLayout_11.addWidget(self.sys_state)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_14 = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.horizontalLayout_5.addWidget(self.label_14)
self.valve1_sta = QtGui.QLabel(self.groupBox_3)
self.valve1_sta.setText(_fromUtf8(""))
self.valve1_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png")))
self.valve1_sta.setObjectName(_fromUtf8("valve1_sta"))
self.horizontalLayout_5.addWidget(self.valve1_sta)
self.horizontalLayout_10.addLayout(self.horizontalLayout_5)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_13 = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.horizontalLayout_3.addWidget(self.label_13)
self.valve2_sta = QtGui.QLabel(self.groupBox_3)
self.valve2_sta.setText(_fromUtf8(""))
self.valve2_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png")))
self.valve2_sta.setObjectName(_fromUtf8("valve2_sta"))
self.horizontalLayout_3.addWidget(self.valve2_sta)
self.horizontalLayout_10.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setSpacing(0)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_15 = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.horizontalLayout_6.addWidget(self.label_15)
self.valve3_sta = QtGui.QLabel(self.groupBox_3)
self.valve3_sta.setText(_fromUtf8(""))
self.valve3_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png")))
self.valve3_sta.setObjectName(_fromUtf8("valve3_sta"))
self.horizontalLayout_6.addWidget(self.valve3_sta)
self.horizontalLayout_10.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setSpacing(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.label_17 = QtGui.QLabel(self.groupBox_3)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_17.setFont(font)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.horizontalLayout_7.addWidget(self.label_17)
self.clean_sta = QtGui.QLabel(self.groupBox_3)
self.clean_sta.setText(_fromUtf8(""))
self.clean_sta.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/guan.png")))
self.clean_sta.setObjectName(_fromUtf8("clean_sta"))
self.horizontalLayout_7.addWidget(self.clean_sta)
self.horizontalLayout_10.addLayout(self.horizontalLayout_7)
self.horizontalLayout_11.addLayout(self.horizontalLayout_10)
self.label = QtGui.QLabel(self.groupBox_3)
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/partulab.png")))
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_11.addWidget(self.label)
self.verticalLayout_2.addLayout(self.horizontalLayout_11)
self.verticalLayout_12.addWidget(self.groupBox_3)
self.verticalLayout_13.addLayout(self.verticalLayout_12)
self.f1_open.setEnabled(False)
self.f2_open.setEnabled(False)
self.f3_open.setEnabled(False)
self.retranslateUi(ST_APP)
self.ST_MPLS.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(ST_APP)
def retranslateUi(self, ST_APP):
ST_APP.setWindowTitle(_translate("ST_APP", "灵敏度-温度谱", None))
self.log_state.setText(_translate("ST_APP", "log", None))
self.groupBox_5.setTitle(_translate("ST_APP", "测量参数", None))
self.label_18.setText(_translate("ST_APP", "测量时间", None))
self.label_5.setText(_translate("ST_APP", "S", None))
self.label_19.setText(_translate("ST_APP", "流量计1", None))
self.label_7.setText(_translate("ST_APP", "mL/min", None))
self.label_20.setText(_translate("ST_APP", "当前阻值", None))
self.label_6.setText(_translate("ST_APP", "Ω", None))
self.label_26.setText(_translate("ST_APP", "流量计2", None))
self.label_8.setText(_translate("ST_APP", "mL/min", None))
self.label_27.setText(_translate("ST_APP", "当前温度", None))
self.label_4.setText(_translate("ST_APP", "℃", None))
self.label_28.setText(_translate("ST_APP", "流量计3", None))
self.label_9.setText(_translate("ST_APP", "mL/min", None))
self.groupBox_15.setTitle(_translate("ST_APP", "样品信息", None))
self.label_16.setText(_translate("ST_APP", "样品标识", None))
self.sample_id.setText(_translate("ST_APP", "ST_test", None))
self.label_21.setText(_translate("ST_APP", "保存路径", None))
self.save_path.setText(_translate("ST_APP", "D:/", None))
self.label_24.setText(_translate("ST_APP", "面积", None))
self.label_25.setText(_translate("ST_APP", "mm^2", None))
self.label_22.setText(_translate("ST_APP", "厚度", None))
self.label_23.setText(_translate("ST_APP", "mm", None))
self.groupBox_2.setTitle(_translate("ST_APP", "参数设置", None))
self.INST_SET.setText(_translate("ST_APP", "仪器设置", None))
self.AI518P_SET.setText(_translate("ST_APP", "温度设置", None))
self.GAS_SET.setText(_translate("ST_APP", "气压控制", None))
self.COORD_SET.setText(_translate("ST_APP", "XY坐标设置", None))
self.groupBox_4.setTitle(_translate("ST_APP", "曲线选择", None))
self.Rt_Curve.setText(_translate("ST_APP", "R-t曲线", None))
self.RT_Curve.setText(_translate("ST_APP", "R-T曲线", None))
self.ST_Curve.setText(_translate("ST_APP", "S-T曲线", None))
self.ST_start.setText(_translate("ST_APP", "开始测量", None))
self.ST_stop.setText(_translate("ST_APP", "停止测量", None))
self.ST_save.setText(_translate("ST_APP", "保存数据", None))
self.groupBox_3.setTitle(_translate("ST_APP", "当前状态", None))
self.label_14.setText(_translate("ST_APP", "阀门1", None))
self.label_13.setText(_translate("ST_APP", "阀门2", None))
self.label_15.setText(_translate("ST_APP", "阀门3", None))
self.label_17.setText(_translate("ST_APP", "清洗阀", None))
import mypic_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
ST_APP = QtGui.QDialog()
ui = Ui_ST_APP()
ui.setupUi(ST_APP)
ST_APP.show()
sys.exit(app.exec_())
| 54
| 113
| 0.710365
|
b0f3b11ce3e1a44bff9b18c8a3c6ca4198257fd9
| 13,941
|
py
|
Python
|
models/networks.py
|
xyang35/pytorch-CycleGAN-and-pix2pix
|
315e3d63e23e65bfd9440e2848bbfbcc901f0b37
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T09:18:02.000Z
|
2022-02-02T05:54:38.000Z
|
models/networks.py
|
richzhang/pytorch-CycleGAN-and-pix2pix
|
e9146a659a896fc71ebf59cc57248449091e8d16
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks.py
|
richzhang/pytorch-CycleGAN-and-pix2pix
|
e9146a659a896fc71ebf59cc57248449091e8d16
|
[
"BSD-3-Clause"
] | 2
|
2021-10-21T05:56:47.000Z
|
2022-02-08T10:41:18.000Z
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1 or classname.find('InstanceNormalization') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def define_G(input_nc, output_nc, ngf, which_model_netG, norm, use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
if norm == 'batch':
norm_layer = nn.BatchNorm2d
elif norm == 'instance':
norm_layer = InstanceNormalization
else:
print('normalization layer [%s] is not found' % norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
print('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(device_id=gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = define_D(input_nc, ndf, 'n_layers', use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid, gpu_ids=gpu_ids)
else:
print('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from Justin Johnson's architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout):
conv_block = []
p = 0
# TODO: support padding types
assert(padding_type == 'zero')
p = 1
# TODO: InstanceNorm
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Defines the Unet generator.
# |num_downs|: number of downsamplings in UNet. For example,
# if |num_downs| == 7, image of size 128x128 will become of size 1x1
# at the bottleneck
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetGenerator, self).__init__()
self.gpu_ids = gpu_ids
# currently support only input_nc == output_nc
assert(input_nc == output_nc)
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, unet_block, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, unet_block)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, unet_block)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, unet_block)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, unet_block, outermost=True)
self.model = unet_block
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc,
submodule=None, outermost=False, innermost=False, use_dropout=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4,
stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.BatchNorm2d(inner_nc)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([self.model(x), x], 1)
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil((kw-1)/2))
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=2, padding=padw),
# TODO: use InstanceNorm
nn.BatchNorm2d(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,
kernel_size=kw, stride=1, padding=padw),
# TODO: useInstanceNorm
nn.BatchNorm2d(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Instance Normalization layer from
# https://github.com/darkstar112358/fast-neural-style
class InstanceNormalization(torch.nn.Module):
"""InstanceNormalization
Improves convergence of neural-style.
ref: https://arxiv.org/pdf/1607.08022.pdf
"""
def __init__(self, dim, eps=1e-5):
super(InstanceNormalization, self).__init__()
self.weight = nn.Parameter(torch.FloatTensor(dim))
self.bias = nn.Parameter(torch.FloatTensor(dim))
self.eps = eps
self._reset_parameters()
def _reset_parameters(self):
self.weight.data.uniform_()
self.bias.data.zero_()
def forward(self, x):
n = x.size(2) * x.size(3)
t = x.view(x.size(0), x.size(1), n)
mean = torch.mean(t, 2).unsqueeze(2).expand_as(x)
# Calculate the biased var. torch.var returns unbiased var
var = torch.var(t, 2).unsqueeze(2).expand_as(x) * ((n - 1) / float(n))
scale_broadcast = self.weight.unsqueeze(1).unsqueeze(1).unsqueeze(0)
scale_broadcast = scale_broadcast.expand_as(x)
shift_broadcast = self.bias.unsqueeze(1).unsqueeze(1).unsqueeze(0)
shift_broadcast = shift_broadcast.expand_as(x)
out = (x - mean) / torch.sqrt(var + self.eps)
out = out * scale_broadcast + shift_broadcast
return out
| 38.404959
| 122
| 0.58848
|
8f78632895a03c931da2270520b73966a6919f69
| 19,178
|
py
|
Python
|
test/functional/wallet_address_types.py
|
robbelouwet/Elixir
|
609412402c5dd4fb9d77ae6d87505d8efd608132
|
[
"MIT"
] | null | null | null |
test/functional/wallet_address_types.py
|
robbelouwet/Elixir
|
609412402c5dd4fb9d77ae6d87505d8efd608132
|
[
"MIT"
] | null | null | null |
test/functional/wallet_address_types.py
|
robbelouwet/Elixir
|
609412402c5dd4fb9d77ae6d87505d8efd608132
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Elixir Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test that the wallet can send and receive using all combinations of address types.
There are 5 nodes-under-test:
- node0 uses legacy addresses
- node1 uses p2sh/segwit addresses
- node2 uses p2sh/segwit addresses and bech32 addresses for change
- node3 uses bech32 addresses
- node4 uses a p2sh/segwit addresses for change
node5 exists to generate new blocks.
## Multisig address test
Test that adding a multisig address with:
- an uncompressed pubkey always gives a legacy address
- only compressed pubkeys gives the an `-addresstype` address
## Sending to address types test
A series of tests, iterating over node0-node4. In each iteration of the test, one node sends:
- 10/101th of its balance to itself (using getrawchangeaddress for single key addresses)
- 20/101th to the next node
- 30/101th to the node after that
- 40/101th to the remaining node
- 1/101th remains as fee+change
Iterate over each node for single key addresses, and then over each node for
multisig addresses.
Repeat test, but with explicit address_type parameters passed to getnewaddress
and getrawchangeaddress:
- node0 and node3 send to p2sh.
- node1 sends to bech32.
- node2 sends to legacy.
As every node sends coins after receiving, this also
verifies that spending coins sent to all these address types works.
## Change type test
Test that the nodes generate the correct change address type:
- node0 always uses a legacy change address.
- node1 uses a bech32 addresses for change if any destination address is bech32.
- node2 always uses a bech32 address for change
- node3 always uses a bech32 address for change
- node4 always uses p2sh/segwit output for change.
"""
from decimal import Decimal
import itertools
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import ElixirTestFramework
from test_framework.descriptors import (
descsum_create,
descsum_check,
)
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
class AddressTypeTest(ElixirTestFramework):
def set_test_params(self):
self.num_nodes = 6
self.extra_args = [
["-addresstype=legacy"],
["-addresstype=p2sh-segwit"],
["-addresstype=p2sh-segwit", "-changetype=bech32"],
["-addresstype=bech32"],
["-changetype=p2sh-segwit"],
[],
]
# whitelist all peers to speed up tx relay / mempool sync
for args in self.extra_args:
args.append("-whitelist=noban@127.0.0.1")
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
# Fully mesh-connect nodes for faster mempool sync
for i, j in itertools.product(range(self.num_nodes), repeat=2):
if i > j:
self.connect_nodes(i, j)
self.sync_all()
def get_balances(self, key='trusted'):
"""Return a list of balances."""
return [self.nodes[i].getbalances()['mine'][key] for i in range(4)]
def test_address(self, node, address, multisig, typ):
"""Run sanity checks on an address."""
info = self.nodes[node].getaddressinfo(address)
assert self.nodes[node].validateaddress(address)['isvalid']
assert_equal(info.get('solvable'), True)
if not multisig and typ == 'legacy':
# P2PKH
assert not info['isscript']
assert not info['iswitness']
assert 'pubkey' in info
elif not multisig and typ == 'p2sh-segwit':
# P2SH-P2WPKH
assert info['isscript']
assert not info['iswitness']
assert_equal(info['script'], 'witness_v0_keyhash')
assert 'pubkey' in info
elif not multisig and typ == 'bech32':
# P2WPKH
assert not info['isscript']
assert info['iswitness']
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 40)
assert 'pubkey' in info
elif typ == 'legacy':
# P2SH-multisig
assert info['isscript']
assert_equal(info['script'], 'multisig')
assert not info['iswitness']
assert 'pubkeys' in info
elif typ == 'p2sh-segwit':
# P2SH-P2WSH-multisig
assert info['isscript']
assert_equal(info['script'], 'witness_v0_scripthash')
assert not info['iswitness']
assert info['embedded']['isscript']
assert_equal(info['embedded']['script'], 'multisig')
assert info['embedded']['iswitness']
assert_equal(info['embedded']['witness_version'], 0)
assert_equal(len(info['embedded']['witness_program']), 64)
assert 'pubkeys' in info['embedded']
elif typ == 'bech32':
# P2WSH-multisig
assert info['isscript']
assert_equal(info['script'], 'multisig')
assert info['iswitness']
assert_equal(info['witness_version'], 0)
assert_equal(len(info['witness_program']), 64)
assert 'pubkeys' in info
else:
# Unknown type
assert False
def test_desc(self, node, address, multisig, typ, utxo):
"""Run sanity checks on a descriptor reported by getaddressinfo."""
info = self.nodes[node].getaddressinfo(address)
assert 'desc' in info
assert_equal(info['desc'], utxo['desc'])
assert self.nodes[node].validateaddress(address)['isvalid']
# Use a ridiculously roundabout way to find the key origin info through
# the PSBT logic. However, this does test consistency between the PSBT reported
# fingerprints/paths and the descriptor logic.
psbt = self.nodes[node].createpsbt([{'txid':utxo['txid'], 'vout':utxo['vout']}],[{address:0.00010000}])
psbt = self.nodes[node].walletprocesspsbt(psbt, False, "ALL", True)
decode = self.nodes[node].decodepsbt(psbt['psbt'])
key_descs = {}
for deriv in decode['inputs'][0]['bip32_derivs']:
assert_equal(len(deriv['master_fingerprint']), 8)
assert_equal(deriv['path'][0], 'm')
key_descs[deriv['pubkey']] = '[' + deriv['master_fingerprint'] + deriv['path'][1:] + ']' + deriv['pubkey']
# Verify the descriptor checksum against the Python implementation
assert descsum_check(info['desc'])
# Verify that stripping the checksum and recreating it using Python roundtrips
assert info['desc'] == descsum_create(info['desc'][:-9])
# Verify that stripping the checksum and feeding it to getdescriptorinfo roundtrips
assert info['desc'] == self.nodes[0].getdescriptorinfo(info['desc'][:-9])['descriptor']
assert_equal(info['desc'][-8:], self.nodes[0].getdescriptorinfo(info['desc'][:-9])['checksum'])
# Verify that keeping the checksum and feeding it to getdescriptorinfo roundtrips
assert info['desc'] == self.nodes[0].getdescriptorinfo(info['desc'])['descriptor']
assert_equal(info['desc'][-8:], self.nodes[0].getdescriptorinfo(info['desc'])['checksum'])
if not multisig and typ == 'legacy':
# P2PKH
assert_equal(info['desc'], descsum_create("pkh(%s)" % key_descs[info['pubkey']]))
elif not multisig and typ == 'p2sh-segwit':
# P2SH-P2WPKH
assert_equal(info['desc'], descsum_create("sh(wpkh(%s))" % key_descs[info['pubkey']]))
elif not multisig and typ == 'bech32':
# P2WPKH
assert_equal(info['desc'], descsum_create("wpkh(%s)" % key_descs[info['pubkey']]))
elif typ == 'legacy':
# P2SH-multisig
assert_equal(info['desc'], descsum_create("sh(multi(2,%s,%s))" % (key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])))
elif typ == 'p2sh-segwit':
# P2SH-P2WSH-multisig
assert_equal(info['desc'], descsum_create("sh(wsh(multi(2,%s,%s)))" % (key_descs[info['embedded']['pubkeys'][0]], key_descs[info['embedded']['pubkeys'][1]])))
elif typ == 'bech32':
# P2WSH-multisig
assert_equal(info['desc'], descsum_create("wsh(multi(2,%s,%s))" % (key_descs[info['pubkeys'][0]], key_descs[info['pubkeys'][1]])))
else:
# Unknown type
assert False
def test_change_output_type(self, node_sender, destinations, expected_type):
txid = self.nodes[node_sender].sendmany(dummy="", amounts=dict.fromkeys(destinations, 0.001))
raw_tx = self.nodes[node_sender].getrawtransaction(txid)
tx = self.nodes[node_sender].decoderawtransaction(raw_tx)
# Make sure the transaction has change:
assert_equal(len(tx["vout"]), len(destinations) + 1)
# Make sure the destinations are included, and remove them:
output_addresses = [vout['scriptPubKey']['address'] for vout in tx["vout"]]
change_addresses = [d for d in output_addresses if d not in destinations]
assert_equal(len(change_addresses), 1)
self.log.debug("Check if change address " + change_addresses[0] + " is " + expected_type)
self.test_address(node_sender, change_addresses[0], multisig=False, typ=expected_type)
def run_test(self):
# Mine 101 blocks on node5 to bring nodes out of IBD and make sure that
# no coinbases are maturing for the nodes-under-test during the test
self.nodes[5].generate(COINBASE_MATURITY + 1)
self.sync_blocks()
uncompressed_1 = "0496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858ee"
uncompressed_2 = "047211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073dee6c89064984f03385237d92167c13e236446b417ab79a0fcae412ae3316b77"
compressed_1 = "0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52"
compressed_2 = "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"
if not self.options.descriptors:
# Tests for addmultisigaddress's address type behavior is only for legacy wallets.
# Descriptor wallets do not have addmultsigaddress so these tests are not needed for those.
# addmultisigaddress with at least 1 uncompressed key should return a legacy address.
for node in range(4):
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [compressed_1, uncompressed_2])['address'], True, 'legacy')
self.test_address(node, self.nodes[node].addmultisigaddress(2, [uncompressed_1, compressed_2])['address'], True, 'legacy')
# addmultisigaddress with all compressed keys should return the appropriate address type (even when the keys are not ours).
self.test_address(0, self.nodes[0].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'legacy')
self.test_address(1, self.nodes[1].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(2, self.nodes[2].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'p2sh-segwit')
self.test_address(3, self.nodes[3].addmultisigaddress(2, [compressed_1, compressed_2])['address'], True, 'bech32')
do_multisigs = [False]
if not self.options.descriptors:
do_multisigs.append(True)
for explicit_type, multisig, from_node in itertools.product([False, True], do_multisigs, range(4)):
address_type = None
if explicit_type and not multisig:
if from_node == 1:
address_type = 'bech32'
elif from_node == 0 or from_node == 3:
address_type = 'p2sh-segwit'
else:
address_type = 'legacy'
self.log.info("Sending from node {} ({}) with{} multisig using {}".format(from_node, self.extra_args[from_node], "" if multisig else "out", "default" if address_type is None else address_type))
old_balances = self.get_balances()
self.log.debug("Old balances are {}".format(old_balances))
to_send = (old_balances[from_node] / (COINBASE_MATURITY + 1)).quantize(Decimal("0.00000001"))
sends = {}
addresses = {}
self.log.debug("Prepare sends")
for n, to_node in enumerate(range(from_node, from_node + 4)):
to_node %= 4
change = False
if not multisig:
if from_node == to_node:
# When sending non-multisig to self, use getrawchangeaddress
address = self.nodes[to_node].getrawchangeaddress(address_type=address_type)
change = True
else:
address = self.nodes[to_node].getnewaddress(address_type=address_type)
else:
addr1 = self.nodes[to_node].getnewaddress()
addr2 = self.nodes[to_node].getnewaddress()
address = self.nodes[to_node].addmultisigaddress(2, [addr1, addr2])['address']
# Do some sanity checking on the created address
if address_type is not None:
typ = address_type
elif to_node == 0:
typ = 'legacy'
elif to_node == 1 or (to_node == 2 and not change):
typ = 'p2sh-segwit'
else:
typ = 'bech32'
self.test_address(to_node, address, multisig, typ)
# Output entry
sends[address] = to_send * 10 * (1 + n)
addresses[to_node] = (address, typ)
self.log.debug("Sending: {}".format(sends))
self.nodes[from_node].sendmany("", sends)
self.sync_mempools()
unconf_balances = self.get_balances('untrusted_pending')
self.log.debug("Check unconfirmed balances: {}".format(unconf_balances))
assert_equal(unconf_balances[from_node], 0)
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(unconf_balances[to_node], to_send * 10 * (2 + n))
# node5 collects fee and block subsidy to keep accounting simple
self.nodes[5].generate(1)
self.sync_blocks()
# Verify that the receiving wallet contains a UTXO with the expected address, and expected descriptor
for n, to_node in enumerate(range(from_node, from_node + 4)):
to_node %= 4
found = False
for utxo in self.nodes[to_node].listunspent():
if utxo['address'] == addresses[to_node][0]:
found = True
self.test_desc(to_node, addresses[to_node][0], multisig, addresses[to_node][1], utxo)
break
assert found
new_balances = self.get_balances()
self.log.debug("Check new balances: {}".format(new_balances))
# We don't know what fee was set, so we can only check bounds on the balance of the sending node
assert_greater_than(new_balances[from_node], to_send * 10)
assert_greater_than(to_send * 11, new_balances[from_node])
for n, to_node in enumerate(range(from_node + 1, from_node + 4)):
to_node %= 4
assert_equal(new_balances[to_node], old_balances[to_node] + to_send * 10 * (2 + n))
# Get one p2sh/segwit address from node2 and two bech32 addresses from node3:
to_address_p2sh = self.nodes[2].getnewaddress()
to_address_bech32_1 = self.nodes[3].getnewaddress()
to_address_bech32_2 = self.nodes[3].getnewaddress()
# Fund node 4:
self.nodes[5].sendtoaddress(self.nodes[4].getnewaddress(), Decimal("1"))
self.nodes[5].generate(1)
self.sync_blocks()
assert_equal(self.nodes[4].getbalance(), 1)
self.log.info("Nodes with addresstype=legacy never use a P2WPKH change output (unless changetype is set otherwise):")
self.test_change_output_type(0, [to_address_bech32_1], 'legacy')
self.log.info("Nodes with addresstype=p2sh-segwit only use a P2WPKH change output if any destination address is bech32:")
self.test_change_output_type(1, [to_address_p2sh], 'p2sh-segwit')
self.test_change_output_type(1, [to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_p2sh, to_address_bech32_1], 'bech32')
self.test_change_output_type(1, [to_address_bech32_1, to_address_bech32_2], 'bech32')
self.log.info("Nodes with change_type=bech32 always use a P2WPKH change output:")
self.test_change_output_type(2, [to_address_bech32_1], 'bech32')
self.test_change_output_type(2, [to_address_p2sh], 'bech32')
self.log.info("Nodes with addresstype=bech32 always use a P2WPKH change output (unless changetype is set otherwise):")
self.test_change_output_type(3, [to_address_bech32_1], 'bech32')
self.test_change_output_type(3, [to_address_p2sh], 'bech32')
self.log.info('getrawchangeaddress defaults to addresstype if -changetype is not set and argument is absent')
self.test_address(3, self.nodes[3].getrawchangeaddress(), multisig=False, typ='bech32')
self.log.info('test invalid address type arguments')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].addmultisigaddress, 2, [compressed_1, compressed_2], None, '')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getnewaddress, None, '')
assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getrawchangeaddress, '')
assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23')
self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output")
self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.log.info("Except for getrawchangeaddress if specified:")
self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit')
self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32')
if __name__ == '__main__':
AddressTypeTest().main()
| 50.73545
| 205
| 0.640682
|
2b3df1855760478a3552e05abb8546bce843bcd8
| 928
|
py
|
Python
|
airylib/NetworkManager.py
|
kickingvegas/airy
|
672d16a14a59e9ef07f8861687b1f775dcb32588
|
[
"Apache-2.0"
] | 2
|
2020-10-14T15:43:13.000Z
|
2020-10-17T19:58:51.000Z
|
airylib/NetworkManager.py
|
kickingvegas/airy
|
672d16a14a59e9ef07f8861687b1f775dcb32588
|
[
"Apache-2.0"
] | null | null | null |
airylib/NetworkManager.py
|
kickingvegas/airy
|
672d16a14a59e9ef07f8861687b1f775dcb32588
|
[
"Apache-2.0"
] | null | null | null |
##
# Copyright 2020 Charles Y. Choi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
class NetworkManager:
def getSensorData(self, sensorID):
url = 'https://www.purpleair.com/json?show={0}'.format(sensorID)
#print(url)
r = requests.get(url)
return r
def getSensorList(self):
url = 'https://www.purpleair.com/json'
r = requests.get(url)
return r
| 28.121212
| 74
| 0.697198
|
e87927180fe6bb9148daf66dc2dfd8f83b23004b
| 10,632
|
py
|
Python
|
framework/CodeInterfaces/Generic/GenericParser.py
|
mattdon/raven
|
b5b4a9fc96cec37ca5fb3757c45653eec66522f1
|
[
"Apache-2.0"
] | null | null | null |
framework/CodeInterfaces/Generic/GenericParser.py
|
mattdon/raven
|
b5b4a9fc96cec37ca5fb3757c45653eec66522f1
|
[
"Apache-2.0"
] | null | null | null |
framework/CodeInterfaces/Generic/GenericParser.py
|
mattdon/raven
|
b5b4a9fc96cec37ca5fb3757c45653eec66522f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 10, 2015
@author: talbpaul
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import warnings
warnings.simplefilter('default',DeprecationWarning)
import os
import sys
import numpy as np
from utils import utils
def _reprIfFloat(value):
"""
Uses repr if the value is a float
@ In, value, any, the value to convert to a string
@ Out, _reprIfFloat, string, a string conversion of this
"""
if utils.isAFloat(value):
return repr(value)
else:
return str(value)
class GenericParser():
"""
import the user-edited input file, build list of strings with replacable parts
"""
def __init__(self,inputFiles,prefix='$RAVEN-',postfix='$',defaultDelim=':', formatDelim='|'):
"""
Accept the input file and parse it by the prefix-postfix breaks. Someday might be able to change prefix,postfix,defaultDelim from input file, but not yet.
@ In, inputFiles, list, string list of input filenames that might need parsing.
@ In, prefix, string, optional, the string prefix to find input variables within the input files
@ In, postfix, string, optional, the string postfix signifying hte end of an input variable within an input file
@ In, defaultDelim, string, optional, the string used between prefix and postfix to set default values
@ In, formatDelim, string, optional, the string used between prefix and postfix to set the format of the value
@ Out, None
"""
self.inputFiles = inputFiles
self.prefixKey=prefix
self.postfixKey=postfix
self.varPlaces = {} # varPlaces[var][inputFile]
self.defaults = {} # defaults[var][inputFile]
self.formats = {} # formats[var][inputFile]
self.acceptFormats = {"d":int,"e":float,"E":float,"f":float,"F":float,"g":float,"G":float}
self.segments = {} # segments[inputFile]
self.printTag = 'GENERIC_PARSER'
for inputFile in self.inputFiles:
infileName = inputFile.getFilename()#os.path.basename(inputFile)
self.segments[infileName] = []
if not os.path.exists(inputFile.getAbsFile()):
## Make sure to cast the inputFile to a string as it may be File object.
raise IOError('Input file not found: ' + str(inputFile))
seg = ''
lines = inputFile.readlines()
inputFile.close()
for line in lines:
while self.prefixKey in line and self.postfixKey in line:
self.segments[infileName].append(seg)
start = line.find(self.prefixKey)
end = line.find(self.postfixKey,start+1)
var = line[start+len(self.prefixKey):end]
if defaultDelim in var or formatDelim in var:
optionalPos = [None]*2
optionalPos[0], optionalPos[1] = var.find(defaultDelim), var.find(formatDelim)
if optionalPos[0] == -1:
optionalPos[0] = sys.maxsize
if optionalPos[1] == -1:
optionalPos[1] = sys.maxsize
defval = var[optionalPos[0]+1:min(optionalPos[1],len(var))] if optionalPos[0] < optionalPos[1] else var[min(optionalPos[0]+1,len(var)):len(var)]
varformat = var[min(optionalPos[1]+1,len(var)):len(var)] if optionalPos[0] < optionalPos[1] else var[optionalPos[1]+1:min(optionalPos[0],len(var))]
var = var[0:min(optionalPos)]
if var in self.defaults.keys() and optionalPos[0] != sys.maxsize:
print('multiple default values given for variable',var)
if var in self.formats.keys() and optionalPos[1] != sys.maxsize:
print('multiple format values given for variable',var)
#TODO allow the user to specify take-last or take-first?
if var not in self.defaults.keys() and optionalPos[0] != sys.maxsize:
self.defaults[var] = {}
if var not in self.formats.keys() and optionalPos[1] != sys.maxsize:
self.formats[var ] = {}
if optionalPos[0] != sys.maxsize:
self.defaults[var][infileName]=defval
if optionalPos[1] != sys.maxsize:
# check if the format is valid
if not any(formVal in varformat for formVal in self.acceptFormats.keys()):
try:
int(varformat)
except ValueError:
raise ValueError("the format specified for wildcard "+ line[start+len(self.prefixKey):end] +
" is unknown. Available are either a plain integer or the following "+" ".join(self.acceptFormats.keys()))
self.formats[var][infileName ]=varformat,int
else:
for formVal in self.acceptFormats.keys():
if formVal in varformat:
self.formats[var][infileName ]=varformat,self.acceptFormats[formVal]; break
self.segments[infileName].append(line[:start])
self.segments[infileName].append(var)
if var not in self.varPlaces.keys():
self.varPlaces[var] = {infileName:[len(self.segments[infileName])-1]}
elif infileName not in self.varPlaces[var].keys():
self.varPlaces[var][infileName]=[len(self.segments[infileName])-1]
else:
self.varPlaces[var][infileName].append(len(self.segments[infileName])-1)
#self.segments.append(line[end+1:])
line=line[end+1:]
seg = ''
else:
seg+=line
self.segments[infileName].append(seg)
def modifyInternalDictionary(self,**Kwargs):
"""
Edits the parsed file stored in self.segments to enter new variable values preperatory to a new run.
@ In, **Kwargs, dict, dict including moddit (the dictionary of variable:value to replace) and additionalEdits.
@ Out, None
"""
modDict = Kwargs['SampledVars']
self.adlDict = Kwargs.get('additionalEdits',{})
ioVars = []
for value in self.adlDict.values():
if type(value)==dict:
for k in value.keys():
ioVars.append(k)
elif type(value)==list:
for v in value:
ioVars.append(v)
else:
ioVars.append(value)
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
if var in modDict.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](modDict[var]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var])
elif var in self.defaults.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](self.defaults[var][inputFile]))
else:
self.segments[inputFile][place] = _reprIfFloat(self.defaults[var][inputFile]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = self.defaults[var][inputFile]
elif var in ioVars:
continue #this gets handled in writeNewInput
else:
raise IOError('Generic Parser: Variable '+var+' was not sampled and no default given!')
def writeNewInput(self,inFiles,origFiles):
"""
Generates a new input file with the existing parsed dictionary.
@ In, inFiles, list, Files list of new input files to return
@ In, origFiles, list, the original list of Files, used for key names
@ Out, None
"""
#get the right IO names put in
case = 'out~'+inFiles[0].getBase() #FIXME the first entry? This is bad! Forces order somewhere in input file
# however, I can't seem to generate an error with this, so maybe it's okay
def getFileWithExtension(fileList,ext):
"""
Just a script to get the file with extension ext from the fileList.
@ In, fileList, list, the Files list of files to pick from.
@ In, ext, string, the string extension that the desired filename ends with.
@ Out, None
"""
found=False
for index,inputFile in enumerate(fileList):
if inputFile.getExt() == ext:
found=True
break
if not found:
raise IOError('No InputFile with extension '+ext+' found!')
return index,inputFile
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
for iotype,adlvar in self.adlDict.items():
if iotype=='output':
if var==self.adlDict[iotype]:
self.segments[inputFile][place] = case
break
elif iotype=='input':
if var in self.adlDict[iotype].keys():
self.segments[inputFile][place] = getFileWithExtension(inFiles,self.adlDict[iotype][var][0].strip('.'))[1].getAbsFile()
break
#now just write the files.
for f,inFile in enumerate(origFiles):
outfile = inFiles[f]
#if os.path.isfile(outfile.getAbsFile()): os.remove(outfile.getAbsFile())
outfile.open('w')
outfile.writelines(''.join(self.segments[inFile.getFilename()]))
outfile.close()
| 48.995392
| 175
| 0.632336
|
f8e25a6b3f08b8748538280fe147d362c6aa35c4
| 3,516
|
py
|
Python
|
plot/microbench/distribution/dis_cpr.py
|
XinYao1994/HOPE
|
99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68
|
[
"Apache-2.0"
] | 108
|
2020-04-23T19:06:51.000Z
|
2022-02-23T20:05:09.000Z
|
plot/microbench/distribution/dis_cpr.py
|
XinYao1994/HOPE
|
99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68
|
[
"Apache-2.0"
] | 1
|
2021-07-07T05:58:57.000Z
|
2021-07-07T05:58:57.000Z
|
plot/microbench/distribution/dis_cpr.py
|
XinYao1994/HOPE
|
99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68
|
[
"Apache-2.0"
] | 11
|
2020-04-24T01:53:50.000Z
|
2022-01-21T07:36:14.000Z
|
import sys
import os
sys.path.append(os.path.abspath('./plot/'))
from option import *
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import numpy as np
import csv
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., height + 0.01,
'%0.1f' % float(height),
# '%d' % int(height),
ha='center', va='bottom')
NAMES = ["Single-Char", "Double-Char", "3-Grams", "4-Grams", "ALM", "ALM-Improved"]
TYPES = ["Dict-A, Email-A", "Dict-B, Email-B", "Dict-A, Email-B", "Dict-B, Email-A"]
REPEAT_TIMES = 2
GROUP_SIZE = 6
COLORS = ['#fee8c8', '#fc8d59', '#d7301f', '#7f0000']
Y_LABEL = "Compression Rate"
X_TICK_FONT_SIZE = 14
Y_TICK_FONT_SIZE = 16
LEGEND_FONT_SIZE = 14
LEGEND_POS = 'upper left'
CSV_X_FILE_PATH = "results/microbench/distribution/dis_cpr.csv"
GRAPH_OUTPUT_PATH = "figures/microbench/distribution/dis_cpr.pdf"
f_in_x = open(CSV_X_FILE_PATH)
reader = csv.reader(f_in_x)
csvrows = list(reader)
data = []
for row in csvrows :
for item in row :
data.append(float(item))
single = data[0:4]
double = data[4:8]
three_gram = data[8:12]
four_gram = data[12:16]
alm = data[16:20]
alm_improve = data[20:24]
all_data = [single, double, three_gram, four_gram, alm, alm_improve]
#========================================================================================
mpl.rcParams['ps.useafm'] = True
mpl.rcParams['pdf.use14corefonts'] = True
mpl.rcParams['text.usetex'] = False
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#========================================================================================
fig = plot.figure(figsize=(8.5, 4))
ax = fig.add_subplot(111)
ax.set_xlim([0,1])
width = 1.0 / (GROUP_SIZE * 4 + GROUP_SIZE + 2)
recs = []
pos = []
for i in range(GROUP_SIZE):
recs.append(ax.bar(width * 1.5 + i * (4 + 1) * width, all_data[i][0], width, color=COLORS[0], linewidth = BORDER_SIZE, edgecolor = BORDER_COLOR))
recs.append(ax.bar(width * 1.5 + i * (4 + 1) * width + width, all_data[i][1], width, color=COLORS[1], linewidth = BORDER_SIZE, edgecolor = BORDER_COLOR))
pos.append(width * 1.5 + i * (4 + 1) * width + width * 2)
recs.append(ax.bar(width * 1.5 + i * (4 + 1) * width + width * 2, all_data[i][2], width, color=COLORS[2], linewidth = BORDER_SIZE, edgecolor = BORDER_COLOR, ))
recs.append(ax.bar(width * 1.5 + i * (4 + 1) * width + width * 3, all_data[i][3], width, color=COLORS[3], linewidth = BORDER_SIZE, edgecolor = BORDER_COLOR, ))
for rec in recs:
autolabel(rec)
y_ticks = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5]
ax.set_yticks(y_ticks)
ax.set_ylim(0, 3.5)
ax.set_ylabel(Y_LABEL, fontsize=Y_LABEL_FONT_SIZE - 4)
ax.legend(recs[0:3], ncol=len(TYPES)/2, loc='upper left', labels=TYPES, fontsize='medium')
ax.set_xticks(pos)
ax.set_xticklabels(NAMES, rotation=20)
for label in ax.get_xticklabels():
label.set_fontsize(X_TICK_FONT_SIZE)
for label in ax.get_yticklabels():
label.set_fontsize(Y_TICK_FONT_SIZE)
plot.savefig(GRAPH_OUTPUT_PATH, bbox_inches='tight')
| 34.811881
| 163
| 0.628271
|
ca91fdac2b105147a518343fa7f74fc905b3eece
| 14,030
|
py
|
Python
|
vaserving/pipeline_manager.py
|
whbruce/video-analytics-serving
|
dee544d528122548f14a3993703182e8ab05ef45
|
[
"BSD-3-Clause"
] | null | null | null |
vaserving/pipeline_manager.py
|
whbruce/video-analytics-serving
|
dee544d528122548f14a3993703182e8ab05ef45
|
[
"BSD-3-Clause"
] | null | null | null |
vaserving/pipeline_manager.py
|
whbruce/video-analytics-serving
|
dee544d528122548f14a3993703182e8ab05ef45
|
[
"BSD-3-Clause"
] | null | null | null |
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import os
import json
import traceback
from collections import deque
from collections import defaultdict
import jsonschema
from vaserving.common.utils import logging
from vaserving.pipeline import Pipeline
import vaserving.schema as schema
class PipelineManager:
def __init__(self, model_manager, pipeline_dir, max_running_pipelines=-1,
ignore_init_errors=False):
self.max_running_pipelines = max_running_pipelines
self.model_manager = model_manager
self.running_pipelines = 0
self.pipeline_types = {}
self.pipeline_instances = {}
self.pipeline_state = {}
self.pipeline_id = 0
self.pipelines = {}
self.pipeline_queue = deque()
self.pipeline_dir = pipeline_dir
self.logger = logging.get_logger('PipelineManager', is_static=True)
success = self._load_pipelines()
if (not ignore_init_errors) and (not success):
raise Exception("Error Initializing Pipelines")
def _import_pipeline_types(self):
pipeline_types = {}
try:
from vaserving.gstreamer_pipeline import GStreamerPipeline # pylint: disable=import-error
pipeline_types['GStreamer'] = GStreamerPipeline
except Exception as error:
pipeline_types['GStreamer'] = None
self.logger.info(
"GStreamer Pipelines Not Enabled: %s\n", error)
try:
from vaserving.ffmpeg_pipeline import FFmpegPipeline # pylint: disable=import-error
pipeline_types['FFmpeg'] = FFmpegPipeline
except Exception as error:
pipeline_types['FFmpeg'] = None
self.logger.info(
"FFmpeg Pipelines Not Enabled: %s\n", error)
pipeline_types = {key: value for key,
value in pipeline_types.items() if value}
return pipeline_types
def _load_pipelines(self):
# TODO: refactor
# pylint: disable=R0912,R1702
heading = "Loading Pipelines"
banner = "="*len(heading)
self.logger.info(banner)
self.logger.info(heading)
self.logger.info(banner)
error_occurred = False
self.pipeline_types = self._import_pipeline_types()
self.logger.info("Loading Pipelines from Config Path {path}".format(
path=self.pipeline_dir))
if os.path.islink(self.pipeline_dir):
self.logger.warning(
"Pipelines directory is symbolic link")
if os.path.ismount(self.pipeline_dir):
self.logger.warning(
"Pipelines directory is mount point")
pipelines = defaultdict(dict)
for root, subdirs, files in os.walk(self.pipeline_dir):
if os.path.abspath(root) == os.path.abspath(self.pipeline_dir):
for subdir in subdirs:
pipelines[subdir] = {}
else:
if len(files) == 0:
pipeline = os.path.basename(root)
pipelines[pipeline] = {}
for subdir in subdirs:
pipelines[pipeline][subdir] = {}
else:
pipeline = os.path.basename(os.path.dirname(root))
version = os.path.basename(root)
for file in files:
path = os.path.join(root, file)
if path.endswith(".json"):
try:
with open(path, 'r') as jsonfile:
config = json.load(jsonfile)
if ('type' not in config) or ('description' not in config):
del pipelines[pipeline][version]
self.logger.error(
"Pipeline %s"
" is missing type or description", pipeline)
error_occurred = True
continue
if "template" in config:
if isinstance(config["template"], list):
config["template"] = "".join(
config["template"])
if config['type'] in self.pipeline_types:
pipelines[pipeline][version] = config
config['name'] = pipeline
config['version'] = version
# validate_config will throw warning of
# missing elements but continue execution
self.pipeline_types[config['type']].validate_config(
config)
self.logger.info("Loading Pipeline: {} version: "
"{} type: {} from {}".format(
pipeline,
version,
config['type'],
path))
else:
del pipelines[pipeline][version]
self.logger.error("Pipeline %s with type %s not supported",
pipeline, config['type'])
error_occurred = True
except Exception as error:
if (pipeline in pipelines) and (version in pipelines[pipeline]):
del pipelines[pipeline][version]
self.logger.error(
"Failed to Load Pipeline from: {}".format(path))
self.logger.error(
"Exception: {}".format(error))
self.logger.error(traceback.format_exc())
error_occurred = True
# Remove pipelines with no valid versions
pipelines = {pipeline: versions for pipeline,
versions in pipelines.items() if len(versions) > 0}
self.pipelines = pipelines
heading = "Completed Loading Pipelines"
banner = "="*len(heading)
self.logger.info(banner)
self.logger.info(heading)
self.logger.info(banner)
return not error_occurred
def get_loaded_pipelines(self):
results = []
for pipeline in self.pipelines:
for version in self.pipelines[pipeline]:
result = self.get_pipeline_parameters(
pipeline, version)
if result:
results.append(result)
return results
def get_pipeline_parameters(self, name, version):
if not self.pipeline_exists(name, version):
return None
params_obj = {
"name": name,
"version": version
}
if "type" in self.pipelines[name][version]:
params_obj["type"] = self.pipelines[name][version]["type"]
if "description" in self.pipelines[name][version]:
params_obj["description"] = self.pipelines[name][version]["description"]
if "parameters" in self.pipelines[name][version]:
params_obj["parameters"] = self.pipelines[name][version]["parameters"]
return params_obj
def is_input_valid(self, request, pipeline_config, section):
config = pipeline_config.get(section, {})
try:
if (section in request):
input_validator = jsonschema.Draft4Validator(
schema=config, format_checker=jsonschema.draft4_format_checker)
input_validator.validate(request.get(section, {}))
self.logger.debug(
"{} Validation successful".format(section))
return True
except Exception as error:
self.logger.debug(
"Validation error in request section {}, error: {}".format(section, error))
return False
def set_section_defaults(self, request, config, request_section, config_section):
section, config = Pipeline.get_section_and_config(
request, config, request_section, config_section)
for key in config:
if (key not in section) and ("default" in config[key]):
section[key] = config[key]["default"]
if (len(section) != 0):
result = request
for key in request_section[0:-1]:
result = result.setdefault(key, {})
result[request_section[-1]] = section
def set_defaults(self, request, config):
if ("destination" not in config):
config["destination"] = schema.destination
if ("source" not in config):
config["source"] = schema.source
if ("tags" not in config):
config["tags"] = schema.tags
self.set_section_defaults(request, config, ["parameters"],
["parameters", "properties"])
if "destination" in request:
if "type" in request["destination"]:
metadata = {"metadata": request["destination"]}
request["destination"] = metadata
for dest_type in request["destination"]:
if "type" in request["destination"][dest_type]:
self.set_section_defaults(request, config, ["destination", dest_type],
["destination", dest_type,
request["destination"][dest_type]["type"],
"properties"])
if ("source" in request) and ("type" in request["source"]):
self.set_section_defaults(request, config, ["source"],
["source",
request["source"]["type"],
"properties"])
self.set_section_defaults(request, config, ["tags"],
["tags", "properties"])
def create_instance(self, name, version, request_original, options):
self.logger.info(
"Creating Instance of Pipeline {name}/{v}".format(name=name, v=version))
if not self.pipeline_exists(name, version):
return None, "Invalid Pipeline or Version"
pipeline_type = self.pipelines[name][str(version)]['type']
pipeline_config = self.pipelines[name][str(version)]
request = request_original.copy()
self.set_defaults(request, pipeline_config)
if not self.is_input_valid(request, pipeline_config, "parameters"):
return None, "Invalid Parameters"
if not self.is_input_valid(request, pipeline_config, "destination"):
return None, "Invalid Destination"
if not self.is_input_valid(request, pipeline_config, "source"):
return None, "Invalid Source"
if not self.is_input_valid(request, pipeline_config, "tags"):
return None, "Invalid Tags"
self.pipeline_id += 1
self.pipeline_instances[self.pipeline_id] = self.pipeline_types[pipeline_type](
self.pipeline_id,
pipeline_config,
self.model_manager,
request,
self._pipeline_finished,
options)
self.pipeline_queue.append(self.pipeline_id)
self._start()
return self.pipeline_id, None
def _get_next_pipeline_identifier(self):
if (self.max_running_pipelines > 0):
if (self.running_pipelines >= self.max_running_pipelines):
return None
try:
if (self.pipeline_queue):
return self.pipeline_queue.popleft()
except Exception:
pass
return None
def _start(self):
pipeline_identifier = self._get_next_pipeline_identifier()
if (pipeline_identifier):
pipeline_to_start = self.pipeline_instances[pipeline_identifier]
self.running_pipelines += 1
pipeline_to_start.start()
def _pipeline_finished(self):
self.running_pipelines -= 1
self._start()
def get_instance_parameters(self, name, version, instance_id):
if self.instance_exists(name, version, instance_id):
return self.pipeline_instances[instance_id].params()
return None
def get_instance_status(self, name, version, instance_id):
if self.instance_exists(name, version, instance_id):
return self.pipeline_instances[instance_id].status()
return None
def stop_instance(self, name, version, instance_id):
if self.instance_exists(name, version, instance_id):
try:
self.pipeline_queue.remove(instance_id)
except Exception:
pass
return self.pipeline_instances[instance_id].stop()
return None
def instance_exists(self, name, version, instance_id):
if (self.pipeline_exists(name, version) and
instance_id in self.pipeline_instances):
return True
self.logger.warning("Invalid Instance ID")
return False
def pipeline_exists(self, name, version):
if name in self.pipelines and str(version) in self.pipelines[name]:
return True
self.logger.warning("Invalid pipeline or version")
return False
| 42.905199
| 102
| 0.533428
|
e3af5b9815f264063cbaf9456763b8244c629463
| 1,239
|
py
|
Python
|
src/gamesbyexample/milliondicestats.py
|
Hacker-Boy9999/PythonStdioGames
|
be96635a3a54e0ca32f2658f9e7089b173db5fa2
|
[
"Python-2.0"
] | null | null | null |
src/gamesbyexample/milliondicestats.py
|
Hacker-Boy9999/PythonStdioGames
|
be96635a3a54e0ca32f2658f9e7089b173db5fa2
|
[
"Python-2.0"
] | null | null | null |
src/gamesbyexample/milliondicestats.py
|
Hacker-Boy9999/PythonStdioGames
|
be96635a3a54e0ca32f2658f9e7089b173db5fa2
|
[
"Python-2.0"
] | null | null | null |
"""Million Dice Roll Statistics Simulator
By Al Sweigart al@inventwithpython.com
A simulation of one million dice rolls.
This and other games are available at https://nostarch.com/XX
Tags: tiny, beginner, math, simulation"""
__version__ = 0
import random, time
print('''Million Dice Roll Statistics Simulator
By Al Sweigart al@inventwithpython.com
Enter how many six-sided dice you want to roll:''')
numberOfDice = int(input('> '))
# Set up a dictionary to store the results of each dice roll:
results = {}
for i in range(numberOfDice, (numberOfDice * 6) + 1):
results[i] = 0
# Simulate dice rolls:
print('Simulating 1,000,000 rolls of {} dice...'.format(numberOfDice))
lastPrintTime = time.time()
for i in range(1000000):
if time.time() > lastPrintTime + 1:
print('{}% done...'.format(round(i / 10000, 1)))
lastPrintTime = time.time()
total = 0
for j in range(numberOfDice):
total = total + random.randint(1, 6)
results[total] = results[total] + 1
# Display results:
print('TOTAL - ROLLS - PERCENTAGE')
for i in range(numberOfDice, (numberOfDice * 6) + 1):
roll = results[i]
percentage = round(results[i] / 10000, 1)
print(' {} - {} rolls - {}%'.format(i, roll, percentage))
| 31.769231
| 70
| 0.678773
|
41827c49b1aef05220a55ce85c77bf51520cc56d
| 1,148
|
py
|
Python
|
domains/fetch/problems/training/problem1081_CR.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | 1
|
2021-09-28T12:56:56.000Z
|
2021-09-28T12:56:56.000Z
|
domains/fetch/problems/training/problem1081_CR.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | null | null | null |
domains/fetch/problems/training/problem1081_CR.py
|
patras91/rae_release
|
0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T16:30:39.000Z
|
2022-03-31T16:30:39.000Z
|
__author__ = 'patras'
from domain_chargeableRobot import *
from timer import DURATION
from state import state
DURATION.TIME = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
DURATION.COUNTER = {
'put': 2,
'take': 2,
'perceive': 2,
'charge': 2,
'move': 2,
'moveToEmergency': 2,
'moveCharger': 2,
'addressEmergency': 2,
'wait': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
rv.EDGES = {1: [2], 2: [1, 3], 3: [2, 4], 4: [5, 3, 6, 7], 5: [4, 9], 6: [4, 10], 7: [4, 8], 8: [7], 9: [5], 10: [6]}
rv.OBJECTS=['o1']
rv.ROBOTS=['r1','r2']
def ResetState():
state.loc = {'r1': 2, 'r2': 1}
state.charge = {'r1': 3, 'r2': 3}
state.load = {'r1': NIL, 'r2': NIL}
state.pos = {'c1': 'r2', 'o1': 6}
state.containers = { 1:[],2:[],3:[],4:[],5:[],6:['o1'],7:[],8:[],9:[],10:[],}
state.emergencyHandling = {'r1': False, 'r2': False}
state.view = {}
for l in rv.LOCATIONS:
state.view[l] = False
tasks = {
2: [['fetch', 'r1', 'o1']],
}
eventsEnv = {
}
| 22.509804
| 117
| 0.493902
|
8d1210ac294cd1d36ac429a739b7b609e6ff67b4
| 2,906
|
py
|
Python
|
cob/utils/docker.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 2
|
2019-04-07T20:19:55.000Z
|
2021-05-27T10:23:31.000Z
|
cob/utils/docker.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 126
|
2016-08-10T19:59:45.000Z
|
2021-11-26T06:58:16.000Z
|
cob/utils/docker.py
|
oren0e/cob
|
f2a5d74a15f5262d7980e4cf1f1a20af29194ffb
|
[
"BSD-3-Clause"
] | 6
|
2017-11-16T12:05:47.000Z
|
2021-11-24T09:21:17.000Z
|
from mitba import cached_function
from ..exceptions import MissingDependency
import os
import shutil
import subprocess
__all__ = ['docker_cmd', 'docker_compose_cmd']
class Docker:
def __init__(self, command_name='docker'):
self._cmdname = command_name
self._cmd_full_path = None
def get_command_name(self):
return self._cmdname
@cached_function
def is_sudo_needed(self):
proc = subprocess.Popen(
'docker ps', shell=True, stderr=subprocess.PIPE, stdout=subprocess.DEVNULL)
err = proc.communicate()[1].decode('utf-8')
if proc.returncode == 0:
return False
assert 'permission denied' in err.lower(), f"'docker ps' failed on {err}"
return True
def get_full_command_name(self):
if self._cmd_full_path is None:
cmd = self._cmdname
if not os.path.isabs(cmd):
cmd = shutil.which(cmd)
if cmd is None:
raise MissingDependency(f'{self._cmdname} could not be found')
self._cmd_full_path = cmd
return self._cmd_full_path
def args(self, args):
return DockerCommand(self, args)
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError(attr)
def returned(args):
return self.args([attr]).args(args)
returned.__name__ = attr
return returned
class DockerCommand:
def __init__(self, docker, args):
self._docker = docker
self._force_sudo = False
self._args = list(args)
def args(self, args):
self._args.extend(args)
return self
def force_sudo(self, force=True):
self._force_sudo = force
return self
def to_split_command(self):
returned = []
if self._force_sudo or self._docker.is_sudo_needed():
returned.extend(
['sudo', '-p', 'Please enter your password to run docker: '])
returned.append(self._docker.get_full_command_name())
returned.extend(self._args)
return returned
def execv(self):
cmd = self.to_split_command()
os.execv(cmd[0], cmd)
def popen(self, *args, **kwargs):
return subprocess.Popen(self.to_split_command(), *args, **kwargs)
def check_output(self, *args, **kwargs):
return subprocess.check_output(self.to_split_command(), *args, **kwargs)
def run(self, use_exec=False):
if use_exec:
self.execv()
else:
returned = self.popen()
if returned.wait() != 0:
raise subprocess.CalledProcessError(
returned.returncode, self.to_split_command())
return returned
def __repr__(self):
return f"[docker] {' '.join(self._args)}"
docker_cmd = Docker()
docker_compose_cmd = Docker('docker-compose')
| 27.942308
| 87
| 0.608052
|
e29536b884c2c732f6620989d007f0a9e7eb181e
| 6,231
|
py
|
Python
|
docusign_esign/models/bulk_recipients_summary_response.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/bulk_recipients_summary_response.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/bulk_recipients_summary_response.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | 1
|
2021-04-26T20:52:45.000Z
|
2021-04-26T20:52:45.000Z
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BulkRecipientsSummaryResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, bulk_recipients=None, bulk_recipients_count=None, bulk_recipients_uri=None, error_details=None):
"""
BulkRecipientsSummaryResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'bulk_recipients': 'list[BulkRecipient]',
'bulk_recipients_count': 'str',
'bulk_recipients_uri': 'str',
'error_details': 'list[ErrorDetails]'
}
self.attribute_map = {
'bulk_recipients': 'bulkRecipients',
'bulk_recipients_count': 'bulkRecipientsCount',
'bulk_recipients_uri': 'bulkRecipientsUri',
'error_details': 'errorDetails'
}
self._bulk_recipients = bulk_recipients
self._bulk_recipients_count = bulk_recipients_count
self._bulk_recipients_uri = bulk_recipients_uri
self._error_details = error_details
@property
def bulk_recipients(self):
"""
Gets the bulk_recipients of this BulkRecipientsSummaryResponse.
A complex type containing information about the bulk recipients in the response.
:return: The bulk_recipients of this BulkRecipientsSummaryResponse.
:rtype: list[BulkRecipient]
"""
return self._bulk_recipients
@bulk_recipients.setter
def bulk_recipients(self, bulk_recipients):
"""
Sets the bulk_recipients of this BulkRecipientsSummaryResponse.
A complex type containing information about the bulk recipients in the response.
:param bulk_recipients: The bulk_recipients of this BulkRecipientsSummaryResponse.
:type: list[BulkRecipient]
"""
self._bulk_recipients = bulk_recipients
@property
def bulk_recipients_count(self):
"""
Gets the bulk_recipients_count of this BulkRecipientsSummaryResponse.
The number of items returned in this response.
:return: The bulk_recipients_count of this BulkRecipientsSummaryResponse.
:rtype: str
"""
return self._bulk_recipients_count
@bulk_recipients_count.setter
def bulk_recipients_count(self, bulk_recipients_count):
"""
Sets the bulk_recipients_count of this BulkRecipientsSummaryResponse.
The number of items returned in this response.
:param bulk_recipients_count: The bulk_recipients_count of this BulkRecipientsSummaryResponse.
:type: str
"""
self._bulk_recipients_count = bulk_recipients_count
@property
def bulk_recipients_uri(self):
"""
Gets the bulk_recipients_uri of this BulkRecipientsSummaryResponse.
Contains a URI for an endpoint that allows you to easily retrieve bulk recipient information.
:return: The bulk_recipients_uri of this BulkRecipientsSummaryResponse.
:rtype: str
"""
return self._bulk_recipients_uri
@bulk_recipients_uri.setter
def bulk_recipients_uri(self, bulk_recipients_uri):
"""
Sets the bulk_recipients_uri of this BulkRecipientsSummaryResponse.
Contains a URI for an endpoint that allows you to easily retrieve bulk recipient information.
:param bulk_recipients_uri: The bulk_recipients_uri of this BulkRecipientsSummaryResponse.
:type: str
"""
self._bulk_recipients_uri = bulk_recipients_uri
@property
def error_details(self):
"""
Gets the error_details of this BulkRecipientsSummaryResponse.
Array or errors.
:return: The error_details of this BulkRecipientsSummaryResponse.
:rtype: list[ErrorDetails]
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this BulkRecipientsSummaryResponse.
Array or errors.
:param error_details: The error_details of this BulkRecipientsSummaryResponse.
:type: list[ErrorDetails]
"""
self._error_details = error_details
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.284974
| 126
| 0.634409
|
58bc6a8fd32b76a2e45ab7a829426ea25d2d8d9e
| 42,493
|
py
|
Python
|
src/emuvim/dcemulator/net.py
|
StefanUPB/vim-emu
|
aacef8f694b996db8fea123024e9a91368f287d2
|
[
"Apache-2.0"
] | null | null | null |
src/emuvim/dcemulator/net.py
|
StefanUPB/vim-emu
|
aacef8f694b996db8fea123024e9a91368f287d2
|
[
"Apache-2.0"
] | null | null | null |
src/emuvim/dcemulator/net.py
|
StefanUPB/vim-emu
|
aacef8f694b996db8fea123024e9a91368f287d2
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2015 SONATA-NFV and Paderborn University
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, Paderborn University
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
import logging
import site
import time
from subprocess import Popen
import re
import requests
import os
import json
from mininet.net import Containernet
from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.clean import cleanup
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
from emuvim.dcemulator.node import Datacenter, EmulatorCompute, EmulatorExtSAP
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
LOG = logging.getLogger("dcemulator.net")
LOG.setLevel(logging.DEBUG)
# default CPU period used for cpu percentage-based cfs values (microseconds)
CPU_PERIOD = 1000000
# default priority setting for added flow-rules
DEFAULT_PRIORITY = 1000
# default cookie number for new flow-rules
DEFAULT_COOKIE = 10
class DCNetwork(Containernet):
"""
Wraps the original Mininet/Containernet class and provides
methods to add data centers, switches, etc.
This class is used by topology definition scripts.
"""
def __init__(self, controller=RemoteController, monitor=False,
enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
"""
Create an extended version of a Containernet network
:param dc_emulation_max_cpu: max. CPU time used by containers in data centers
:param kwargs: path through for Mininet parameters
:return:
"""
# members
self.dcs = {}
self.ryu_process = None
#list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
self.deployed_nsds = []
self.deployed_elines = []
self.deployed_elans = []
self.installed_chains = []
# always cleanup environment before we start the emulator
self.killRyu()
cleanup()
# call original Docker.__init__ and setup default controller
Containernet.__init__(
self, switch=OVSKernelSwitch, controller=controller, **kwargs)
# default switch configuration
enable_ryu_learning = False
if enable_learning :
self.failMode = 'standalone'
enable_ryu_learning = True
else:
self.failMode = 'secure'
# Ryu management
if controller == RemoteController:
# start Ryu controller
self.startRyu(learning_switch=enable_ryu_learning)
# add the specified controller
self.addController('c0', controller=controller)
# graph of the complete DC network
self.DCNetwork_graph = nx.MultiDiGraph()
# initialize pool of vlan tags to setup the SDN paths
self.vlans = range(1, 4095)[::-1]
# link to Ryu REST_API
ryu_ip = 'localhost'
ryu_port = '8080'
self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
self.RyuSession = requests.Session()
# monitoring agent
if monitor:
self.monitor_agent = DCNetworkMonitor(self)
else:
self.monitor_agent = None
# initialize resource model registrar
self.rm_registrar = ResourceModelRegistrar(
dc_emulation_max_cpu, dc_emulation_max_mem)
self.cpu_period = CPU_PERIOD
def addDatacenter(self, label, metadata={}, resource_log_path=None):
"""
Create and add a logical cloud data center to the network.
"""
if label in self.dcs:
raise Exception("Data center label already exists: %s" % label)
dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
dc.net = self # set reference to network
self.dcs[label] = dc
dc.create() # finally create the data center in our Mininet instance
LOG.info("added data center: %s" % label)
return dc
def addLink(self, node1, node2, **params):
"""
Able to handle Datacenter objects as link
end points.
"""
assert node1 is not None
assert node2 is not None
# ensure type of node1
if isinstance( node1, basestring ):
if node1 in self.dcs:
node1 = self.dcs[node1].switch
if isinstance( node1, Datacenter ):
node1 = node1.switch
# ensure type of node2
if isinstance( node2, basestring ):
if node2 in self.dcs:
node2 = self.dcs[node2].switch
if isinstance( node2, Datacenter ):
node2 = node2.switch
# try to give containers a default IP
if isinstance( node1, Docker ):
if "params1" not in params:
params["params1"] = {}
if "ip" not in params["params1"]:
params["params1"]["ip"] = self.getNextIp()
if isinstance( node2, Docker ):
if "params2" not in params:
params["params2"] = {}
if "ip" not in params["params2"]:
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
# see Containernet issue: https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
link = Containernet.addLink(self, node1, node2, **params)
# try to give container interfaces a default id
node1_port_id = node1.ports[link.intf1]
if isinstance(node1, Docker):
if "id" in params["params1"]:
node1_port_id = params["params1"]["id"]
node1_port_name = link.intf1.name
node2_port_id = node2.ports[link.intf2]
if isinstance(node2, Docker):
if "id" in params["params2"]:
node2_port_id = params["params2"]["id"]
node2_port_name = link.intf2.name
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
# port: portnumber assigned by Containernet
attr_dict = {}
# possible weight metrics allowed by TClink class:
weight_metrics = ['bw', 'delay', 'jitter', 'loss']
edge_attributes = [p for p in params if p in weight_metrics]
for attr in edge_attributes:
# if delay: strip ms (need number as weight in graph)
match = re.search('([0-9]*\.?[0-9]+)', str(params[attr]))
if match:
attr_number = match.group(1)
else:
attr_number = None
attr_dict[attr] = attr_number
attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
'src_port_name': node1_port_name,
'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
'src_port_name': node2_port_name,
'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
str(node1),node1_port_name, str(node2), node2_port_name))
return link
def removeLink(self, link=None, node1=None, node2=None):
"""
Remove the link from the Containernet and the networkx graph
"""
if link is not None:
node1 = link.intf1.node
node2 = link.intf2.node
assert node1 is not None
assert node2 is not None
Containernet.removeLink(self, link=link, node1=node1, node2=node2)
# TODO we might decrease the loglevel to debug:
try:
self.DCNetwork_graph.remove_edge(node2.name, node1.name)
except:
LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
try:
self.DCNetwork_graph.remove_edge(node1.name, node2.name)
except:
LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
def addDocker( self, label, **params ):
"""
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
def removeDocker( self, label, **params):
"""
Wrapper for removeDocker method to update graph.
"""
self.DCNetwork_graph.remove_node(label)
return Containernet.removeDocker(self, label, **params)
def addExtSAP(self, sap_name, sap_ip, **params):
"""
Wrapper for addExtSAP method to store SAP also in graph.
"""
# make sure that 'type' is set
params['type'] = params.get('type','sap_ext')
self.DCNetwork_graph.add_node(sap_name, type=params['type'])
return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
def removeExtSAP(self, sap_name, **params):
"""
Wrapper for removeExtSAP method to remove SAP also from graph.
"""
self.DCNetwork_graph.remove_node(sap_name)
return Containernet.removeExtSAP(self, sap_name)
def addSwitch( self, name, add_to_graph=True, **params ):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
# add this switch to the global topology overview
if add_to_graph:
self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))
# set the learning switch behavior
if 'failMode' in params :
failMode = params['failMode']
else :
failMode = self.failMode
s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
return s
def getAllContainers(self):
"""
Returns a list with all containers within all data centers.
"""
all_containers = []
for dc in self.dcs.itervalues():
all_containers += dc.listCompute()
return all_containers
def start(self):
# start
for dc in self.dcs.itervalues():
dc.start()
Containernet.start(self)
def stop(self):
# stop the monitor agent
if self.monitor_agent is not None:
self.monitor_agent.stop()
# stop emulator net
Containernet.stop(self)
# stop Ryu controller
self.killRyu()
def CLI(self):
CLI(self)
def setLAN(self, vnf_list):
"""
setup an E-LAN network by assigning the same VLAN tag to each DC interface of the VNFs in the E-LAN
:param vnf_list: names of the VNFs in this E-LAN [{name:,interface:},...]
:return:
"""
src_sw = None
src_sw_inport_nr = 0
src_sw_inport_name = None
# get a vlan tag for this E-LAN
vlan = self.vlans.pop()
for vnf in vnf_list:
vnf_src_name = vnf['name']
vnf_src_interface = vnf['interface']
# check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
vnf_src_interface = link_dict[0]['src_port_id']
for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
break
# set the tag on the dc switch interface
LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
switch_node = self.getNodeByName(src_sw)
self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
tag=None, **kwargs):
"""
Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
:param vnf_src_name:
:param vnf_dst_name:
:param vnf_src_interface:
:param vnf_dst_interface:
:param tag: vlan tag to be used for this chain (same tag as existing chain)
:param monitor_placement: 'tx' or 'rx' indicating to place the extra flowentry resp. at the beginning or end of the chain
:return:
"""
src_sw = None
src_sw_inport_nr = 0
src_sw_inport_name = None
dst_sw = None
dst_sw_outport_nr = 0
dst_sw_outport_name = None
LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
#check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
vnf_src_interface = link_dict[0]['src_port_id']
for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
break
if vnf_dst_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
vnf_dst_interface = link_dict[0]['dst_port_id']
vnf_dst_name = vnf_dst_name.split(':')[0]
for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
for link in link_dict:
if link_dict[link]['dst_port_id'] == vnf_dst_interface or \
link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
dst_sw = connected_sw
dst_sw_outport_nr = link_dict[link]['src_port_nr']
dst_sw_outport_name = link_dict[link]['src_port_name']
break
if not tag >= 0:
LOG.exception('tag not valid: {0}'.format(tag))
# get shortest path
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
except:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
cmd = kwargs.get('cmd')
#iterate through the path to install the flow-entries
for i in range(0,len(path)):
current_node = self.getNodeByName(current_hop)
if path.index(current_hop) < len(path)-1:
next_hop = path[path.index(current_hop)+1]
else:
#last switch reached
next_hop = vnf_dst_name
next_node = self.getNodeByName(next_hop)
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
elif not isinstance( next_node, OVSSwitch ):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
# set of entry via ovs-ofctl
if isinstance( current_node, OVSSwitch ):
kwargs['vlan'] = tag
kwargs['path'] = path
kwargs['current_hop'] = current_hop
kwargs['switch_inport_name'] = src_sw_inport_name
kwargs['switch_outport_name'] = dst_sw_outport_name
kwargs['skip_vlan_tag'] = True
kwargs['pathindex'] = i
monitor_placement = kwargs.get('monitor_placement').strip()
# put monitor flow at the dst switch
insert_flow = False
if monitor_placement == 'tx' and path.index(current_hop) == 0: # first node:
insert_flow = True
# put monitoring flow at the src switch
elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1: # last node:
insert_flow = True
elif monitor_placement not in ['rx', 'tx']:
LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
if self.controller == RemoteController and insert_flow:
## set flow entry via ryu rest api
self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
elif insert_flow:
## set flow entry via ovs-ofctl
self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
"""
Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
Currently the path is found using the default networkx shortest path function.
Each chain gets a unique vlan id , so different chains wil not interfere.
:param vnf_src_name: vnf name (string)
:param vnf_dst_name: vnf name (string)
:param vnf_src_interface: source interface name (string)
:param vnf_dst_interface: destination interface name (string)
:param cmd: 'add-flow' (default) to add a chain, 'del-flows' to remove a chain
:param cookie: cookie for the installed flowrules (can be used later as identifier for a set of installed chains)
:param match: custom match entry to be added to the flowrules (default: only in_port and vlan tag)
:param priority: custom flowrule priority
:param monitor: boolean to indicate whether this chain is a monitoring chain
:param tag: vlan tag to be used for this chain (pre-defined or new one if none is specified)
:param skip_vlan_tag: boolean to indicate if a vlan tag should be appointed to this flow or not
:param path: custom path between the two VNFs (list of switches)
:return: output log string
"""
# special procedure for monitoring flows
if kwargs.get('monitor'):
# check if chain already exists
found_chains = [chain_dict for chain_dict in self.installed_chains if
(chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
if len(found_chains) > 0:
# this chain exists, so need an extra monitoring flow
# assume only 1 chain per vnf/interface pair
LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
tag = found_chains[0]['tag']
ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
tag=tag, table_id=0, **kwargs)
return ret
else:
# no chain existing (or E-LAN) -> install normal chain
LOG.warning('*** installing monitoring chain without pre-defined NSD chain from {0}:{1} -> {2}:{3}'.
format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
pass
cmd = kwargs.get('cmd', 'add-flow')
if cmd == 'add-flow' or cmd == 'del-flows':
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
if kwargs.get('path') is not None:
kwargs['path'] = list(reversed(kwargs.get('path')))
ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
else:
ret = "Command unknown"
return ret
def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
src_sw = None
src_sw_inport_nr = 0
src_sw_inport_name = None
dst_sw = None
dst_sw_outport_nr = 0
dst_sw_outport_name = None
LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
#check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
vnf_src_interface = link_dict[0]['src_port_id']
for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
break
if vnf_dst_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
vnf_dst_interface = link_dict[0]['dst_port_id']
vnf_dst_name = vnf_dst_name.split(':')[0]
for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
for link in link_dict:
if link_dict[link]['dst_port_id'] == vnf_dst_interface or \
link_dict[link]['dst_port_name'] == vnf_dst_interface: # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
dst_sw = connected_sw
dst_sw_outport_nr = link_dict[link]['src_port_nr']
dst_sw_outport_name = link_dict[link]['src_port_name']
break
path = kwargs.get('path')
if path is None:
# get shortest path
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
# path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
# adjustment: transform delays to int (on copied graph) and use them for computing shortest paths
g = self.DCNetwork_graph.copy()
edge_delays = nx.get_edge_attributes(g, 'delay')
int_delays = {k: int(v) for (k, v) in edge_delays.items()}
nx.set_edge_attributes(g, 'delay', int_delays)
path = nx.shortest_path(g, src_sw, dst_sw, weight='delay')
print('Calculated shortest path (for delay): {}'.format(path))
except:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
# choose free vlan
cmd = kwargs.get('cmd')
vlan = None
if cmd == 'add-flow':
if kwargs.get('tag'):
# use pre-defined tag
vlan = kwargs.get('tag')
else:
vlan = self.vlans.pop()
# store the used vlan tag to identify this chain
if not kwargs.get('monitor'):
chain_dict = {}
chain_dict['vnf_src_name'] = vnf_src_name
chain_dict['vnf_dst_name'] = vnf_dst_name
chain_dict['vnf_src_interface'] = vnf_src_interface
chain_dict['vnf_dst_interface'] = vnf_dst_interface
chain_dict['tag'] = vlan
self.installed_chains.append(chain_dict)
#iterate through the path to install the flow-entries
for i in range(0,len(path)):
current_node = self.getNodeByName(current_hop)
if i < len(path) - 1:
next_hop = path[i + 1]
else:
# last switch reached
next_hop = vnf_dst_name
next_node = self.getNodeByName(next_hop)
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
elif not isinstance( next_node, OVSSwitch ):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
# set OpenFlow entry
if isinstance( current_node, OVSSwitch ):
kwargs['vlan'] = vlan
kwargs['path'] = path
kwargs['current_hop'] = current_hop
kwargs['switch_inport_name'] = src_sw_inport_name
kwargs['switch_outport_name'] = dst_sw_outport_name
kwargs['pathindex'] = i
if self.controller == RemoteController:
## set flow entry via ryu rest api
self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
else:
## set flow entry via ovs-ofctl
self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
flow_options = {
'priority':kwargs.get('priority', DEFAULT_PRIORITY),
'cookie':kwargs.get('cookie', DEFAULT_COOKIE),
'vlan':kwargs['vlan'],
'path':kwargs['path'],
'match_input':kwargs.get('match')
}
flow_options_str = json.dumps(flow_options, indent=1)
return "success: {2} between {0} and {1} with options: {3}".format(vnf_src_name, vnf_dst_name, cmd, flow_options_str)
def _set_flow_entry_ryu_rest(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
match_input = kwargs.get('match')
cmd = kwargs.get('cmd')
path = kwargs.get('path')
index = kwargs.get('pathindex')
vlan = kwargs.get('vlan')
priority = kwargs.get('priority', DEFAULT_PRIORITY)
# flag to not set the ovs port vlan tag
skip_vlan_tag = kwargs.get('skip_vlan_tag')
# table id to put this flowentry
table_id = kwargs.get('table_id')
if not table_id:
table_id = 0
s = ','
if match_input:
match = s.join([match, match_input])
flow = {}
flow['dpid'] = int(node.dpid, 16)
if cookie:
flow['cookie'] = int(cookie)
if priority:
flow['priority'] = int(priority)
flow['table_id'] = table_id
flow['actions'] = []
# possible Ryu actions, match fields:
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
if cmd == 'add-flow':
prefix = 'stats/flowentry/add'
if vlan != None:
if index == 0: # first node
# set vlan tag in ovs instance (to isolate E-LANs)
if not skip_vlan_tag:
in_port_name = kwargs.get('switch_inport_name')
self._set_vlan_tag(node, in_port_name, vlan)
# set vlan push action if more than 1 switch in the path
if len(path) > 1:
action = {}
action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
flow['actions'].append(action)
action = {}
action['type'] = 'SET_FIELD'
action['field'] = 'vlan_vid'
# ryu expects the field to be masked
action['value'] = vlan | 0x1000
flow['actions'].append(action)
elif index == len(path) - 1: # last node
# set vlan tag in ovs instance (to isolate E-LANs)
if not skip_vlan_tag:
out_port_name = kwargs.get('switch_outport_name')
self._set_vlan_tag(node, out_port_name, vlan)
# set vlan pop action if more than 1 switch in the path
if len(path) > 1:
match += ',dl_vlan=%s' % vlan
action = {}
action['type'] = 'POP_VLAN'
flow['actions'].append(action)
else: # middle nodes
match += ',dl_vlan=%s' % vlan
# output action must come last
action = {}
action['type'] = 'OUTPUT'
action['port'] = switch_outport_nr
flow['actions'].append(action)
elif cmd == 'del-flows':
prefix = 'stats/flowentry/delete'
if cookie:
# TODO: add cookie_mask as argument
flow['cookie_mask'] = int('0xffffffffffffffff', 16) # need full mask to match complete cookie
action = {}
action['type'] = 'OUTPUT'
action['port'] = switch_outport_nr
flow['actions'].append(action)
flow['match'] = self._parse_match(match)
self.ryu_REST(prefix, data=flow)
def _set_vlan_tag(self, node, switch_port, tag):
node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
match_input = kwargs.get('match')
cmd = kwargs.get('cmd')
path = kwargs.get('path')
index = kwargs.get('pathindex')
vlan = kwargs.get('vlan')
s = ','
if cookie:
cookie = 'cookie=%s' % cookie
match = s.join([cookie, match])
if match_input:
match = s.join([match, match_input])
if cmd == 'add-flow':
action = 'action=%s' % switch_outport_nr
if vlan != None:
if index == 0: # first node
action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
match = '-O OpenFlow13 ' + match
elif index == len(path) - 1: # last node
match += ',dl_vlan=%s' % vlan
action = 'action=strip_vlan,output=%s' % switch_outport_nr
else: # middle nodes
match += ',dl_vlan=%s' % vlan
ofcmd = s.join([match, action])
elif cmd == 'del-flows':
ofcmd = match
else:
ofcmd = ''
node.dpctl(cmd, ofcmd)
LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
switch_outport_nr, cmd))
# start Ryu Openflow controller as Remote Controller for the DCNetwork
def startRyu(self, learning_switch=True):
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
# ryu default learning switch
#ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
#custom learning switch that installs a default NORMAL action in the ovs switches
dir_path = os.path.dirname(os.path.realpath(__file__))
ryu_path = dir_path + '/son_emu_simple_switch_13.py'
ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
# change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
# Ryu still uses 6633 as default
ryu_option = '--ofp-tcp-listen-port'
ryu_of_port = '6653'
ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
if learning_switch:
self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
else:
# no learning switch, but with rest api
self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
time.sleep(1)
def killRyu(self):
"""
Stop the Ryu controller that might be started by son-emu.
:return:
"""
# try it nicely
if self.ryu_process is not None:
self.ryu_process.terminate()
self.ryu_process.kill()
# ensure its death ;-)
Popen(['pkill', '-f', 'ryu-manager'])
def ryu_REST(self, prefix, dpid=None, data=None):
if dpid:
url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
else:
url = self.ryu_REST_api + '/' + str(prefix)
if data:
req = self.RyuSession.post(url, json=data)
else:
req = self.RyuSession.get(url)
# do extra logging if status code is not 200 (OK)
if req.status_code is not requests.codes.ok:
logging.info(
'type {0} encoding: {1} text: {2} headers: {3} history: {4}'.format(req.headers['content-type'],
req.encoding, req.text,
req.headers, req.history))
LOG.info('url: {0}'.format(str(url)))
if data: LOG.info('POST: {0}'.format(str(data)))
LOG.info('status: {0} reason: {1}'.format(req.status_code, req.reason))
if 'json' in req.headers['content-type']:
ret = req.json()
return ret
ret = req.text.rstrip()
return ret
# need to respect that some match fields must be integers
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
def _parse_match(self, match):
matches = match.split(',')
dict = {}
for m in matches:
match = m.split('=')
if len(match) == 2:
try:
m2 = int(match[1], 0)
except:
m2 = match[1]
dict.update({match[0]:m2})
return dict
def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface=None):
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
vnf_src_interface = link_dict[0]['src_port_id']
for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
return src_sw_inport_name
| 43.493347
| 166
| 0.595557
|
12b3356ae54cb1ed487b5c741cc953173b24796e
| 11,578
|
py
|
Python
|
train.py
|
abdullahozer11/Segmentation-and-Classification-of-Objects-in-Point-Clouds
|
08126539226dece8718e6a3c0092992b1666e702
|
[
"MIT"
] | 5
|
2019-03-12T19:06:21.000Z
|
2021-07-11T19:57:29.000Z
|
log/train.py
|
abdullahozer11/Segmentation-and-Classification-of-Objects-in-Point-Clouds
|
08126539226dece8718e6a3c0092992b1666e702
|
[
"MIT"
] | null | null | null |
log/train.py
|
abdullahozer11/Segmentation-and-Classification-of-Objects-in-Point-Clouds
|
08126539226dece8718e6a3c0092992b1666e702
|
[
"MIT"
] | 2
|
2019-02-03T09:20:12.000Z
|
2021-05-02T08:19:56.000Z
|
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models'))
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet_cls', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
#parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 16]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--learning_rate', type=float, default=0.0005, help='Initial learning rate [default: 0.0001]')
#parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
#here it might be needed to change
MAX_NUM_POINT = 2048
NUM_CLASSES = 2
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
flag=0
# ModelNet40 official train/test split
TRAIN_FILES = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
log_string("Training for one epoch starting now")
train_one_epoch(sess, ops, train_writer)
log_string("Training for one epoch ended")
log_string("Evaluation for one epoch starting now")
eval_one_epoch(sess, ops, test_writer)
if(flag==1):
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
break
log_string("Evaluation for one epoch ended")
# Save the variables to disk.
'''if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)'''
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
print("len(TRAIN_FILES) is \n",len(TRAIN_FILES))
for fn in range(len(TRAIN_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
current_data = current_data[:,0:NUM_POINT,:]
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
total_correct = 0
total_seen = 0
loss_sum = 0
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
# Augment batched point clouds by rotation and jittering
rotated_data = provider.rotate_point_cloud(current_data[start_idx:end_idx, :, :])
jittered_data = provider.jitter_point_cloud(rotated_data)
feed_dict = {ops['pointclouds_pl']: jittered_data,
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training,}
summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)
train_writer.add_summary(summary, step)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += loss_val
log_string('mean loss: %f' % (loss_sum / float(num_batches)))
log_string('accuracy: %f' % (total_correct / float(total_seen)))
def eval_one_epoch(sess, ops, test_writer):
global flag
""" ops: dict mapping from string to tf ops """
is_training = False
total_correct = 0
total_seen = 0
loss_sum = 0
total_seen_class = [0 for _ in range(NUM_CLASSES)]
total_correct_class = [0 for _ in range(NUM_CLASSES)]
for fn in range(len(TEST_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
ops['labels_pl']: current_label[start_idx:end_idx],
ops['is_training_pl']: is_training}
summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],
ops['loss'], ops['pred']], feed_dict=feed_dict)
pred_val = np.argmax(pred_val, 1)
correct = np.sum(pred_val == current_label[start_idx:end_idx])
total_correct += correct
total_seen += BATCH_SIZE
loss_sum += (loss_val*BATCH_SIZE)
for i in range(start_idx, end_idx):
l = int(current_label[i])
total_seen_class[l] += 1
total_correct_class[l] += (pred_val[i-start_idx] == l)
log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))
log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
if ((total_correct/total_seen)==1.0):
flag=1
if __name__ == "__main__":
train()
LOG_FOUT.close()
| 41.797834
| 133
| 0.639143
|
1ecaa6bea3828721f8a601db451a61f0b335f16e
| 5,409
|
py
|
Python
|
hw1/behavior_cloning.py
|
xiaonanchong/DeepRL_CS294
|
67a0833b657b33ad7f7ad6de30e270847575828d
|
[
"MIT"
] | null | null | null |
hw1/behavior_cloning.py
|
xiaonanchong/DeepRL_CS294
|
67a0833b657b33ad7f7ad6de30e270847575828d
|
[
"MIT"
] | null | null | null |
hw1/behavior_cloning.py
|
xiaonanchong/DeepRL_CS294
|
67a0833b657b33ad7f7ad6de30e270847575828d
|
[
"MIT"
] | null | null | null |
"""
Example usage:
python behavior_cloning.py Hopper-v1 --render --hidden_dim 100 --weight_decay 1e-4 \
--batchsize 100 --epoch 50 --test_rollouts 5
"""
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import gym
from torch.optim import Adam
from torch.autograd import Variable
from sklearn.utils import shuffle
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('task', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument('--hidden_dim', type=int, default=100,
help='dim of hidden layer')
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--batchsize', type=int, default=100)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--test_rollouts', type=int, default=5,
help='number of rollouts when test policy')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = pickle.load(open("data/"+args.task+".p", "rb"))
x = data['observations']
y = data['actions']
print ('dimension of obs :', x.shape)
print('dimension of actions :', y.shape)
x,y = shuffle(x,y)
#split tarin/validation set
num_train = int(len(x)*0.9)
num_val = int(len(x)*0.1)
x_train, y_train = x[:num_train],y[:num_train]
x_val,y_val = x[num_train:],y[num_train:]
class simple_model(nn.Module):
def __init__(self,indim=100, hidden = 100,outdim = 100):
super(simple_model, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(indim, hidden),
nn.ReLU(),
nn.Linear(hidden, hidden//2),
nn.ReLU(),
nn.Linear(hidden//2, hidden//4),
nn.ReLU(),
nn.Linear(hidden//4,outdim)
)
def obs_norm(self, obs_bo, obsnorm_mean, obsnorm_meansq):
obsnorm_mean = torch.FloatTensor(obsnorm_mean)
obsnorm_meansq = torch.FloatTensor(obsnorm_meansq)
obsnorm_stdev = torch.sqrt(torch.max(torch.zeros(obsnorm_mean.size()), obsnorm_meansq - obsnorm_mean**2)).to(device)
normedobs_bo = (obs_bo - obsnorm_mean.to(device)) / (obsnorm_stdev + 1e-6)
return normedobs_bo
def forward(self, obs_bo,obsnorm_mean, obsnorm_meansq):
return self.fc1(self.obs_norm(obs_bo,obsnorm_mean, obsnorm_meansq))
#training simple_model
model = simple_model(x.shape[1], #input dim
args.hidden_dim, #hidden dim
y.shape[1]).to(device) #output dim
optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()),weight_decay = args.weight_decay)
for i in range(args.epoch):
epoch_loss = 0
for idx in range(0,num_train,args.batchsize):
x_batch, y_batch = torch.FloatTensor(x_train[idx:idx+args.batchsize]).to(device), torch.FloatTensor(y_train[idx:idx+args.batchsize]).to(device)
optimizer.zero_grad()
y_ = model(x_batch,data['standardizer_mean'],data['standardizer_meansq'])
loss = F.mse_loss(y_,y_batch)
epoch_loss = epoch_loss + loss
loss.backward()
optimizer.step()
print ("epoch :", i+1)
print ("train loss: ", (epoch_loss/num_train).cpu().detach().numpy())
epoch_loss = 0
for idx in range(0,num_val,args.batchsize):
x_batch, y_batch = x_val[idx:idx+args.batchsize],y_val[idx:idx+args.batchsize]
x_batch, y_batch = torch.FloatTensor(x_batch), torch.FloatTensor(y_batch)
x_batch, y_batch = x_batch.cuda(),y_batch.cuda()
y_ = model(x_batch,data['standardizer_mean'],data['standardizer_meansq'])
loss = F.mse_loss(y_,y_batch)
epoch_loss = epoch_loss + loss
print ("val loss: ", (epoch_loss/num_val).cpu().detach().numpy())
print ("\n")
#evaluate trained policy
env = gym.make(args.task)
returns = []
observations = []
actions = []
max_steps = env.spec.timestep_limit
for i in range(args.test_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = model(torch.FloatTensor(obs).to(device).cuda(),data['standardizer_mean'],data['standardizer_meansq']).cpu().detach().numpy()
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps >= max_steps:
break
returns.append(totalr)
bc_mean=np.mean(returns)
bc_std=np.std(returns)
print ('\n' + args.task)
print('\n <bc policy>')
print('mean return', bc_mean)
print('std of return', bc_std)
print('\n <expert policy>')
print('mean return', np.mean(data['returns']))
print('std of return', np.std(data['returns']))
if __name__ == '__main__':
main()
| 34.018868
| 155
| 0.577556
|
a73a1aede40008f0e01cf27b6f89e802aa585e8a
| 5,583
|
py
|
Python
|
mongo/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | null | null | null |
mongo/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 4
|
2019-07-03T02:53:19.000Z
|
2019-07-10T14:52:14.000Z
|
mongo/tests/conftest.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T13:35:17.000Z
|
2019-12-23T13:35:17.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import time
import pymongo
import pytest
from datadog_checks.dev import LazyFunction, WaitFor, docker_run, run_command
from datadog_checks.mongo import MongoDb
from . import common
@pytest.fixture(scope='session')
def dd_environment(instance):
compose_file = os.path.join(common.HERE, 'compose', 'docker-compose.yml')
with docker_run(
compose_file, conditions=[WaitFor(setup_sharding, args=(compose_file,), attempts=5, wait=5), InitializeDB()]
):
yield instance
@pytest.fixture(scope='session')
def instance():
return {'server': common.MONGODB_SERVER}
@pytest.fixture(scope='session')
def instance_user():
return {'server': 'mongodb://testUser2:testPass2@{}:{}/test'.format(common.HOST, common.PORT1)}
@pytest.fixture(scope='session')
def instance_authdb():
return {'server': 'mongodb://testUser:testPass@{}:{}/test?authSource=authDB'.format(common.HOST, common.PORT1)}
@pytest.fixture(scope='session')
def instance_custom_queries():
return {
'server': 'mongodb://testUser2:testPass2@{}:{}/test'.format(common.HOST, common.PORT1),
'custom_queries': [
{
"metric_prefix": "dd.custom.mongo.query_a",
"query": {'find': "orders", 'filter': {'amount': {'$gt': 25}}, 'sort': {'amount': -1}},
"fields": [
{"field_name": "cust_id", "name": "cluster_id", "type": "tag"},
{"field_name": "status", "name": "status_tag", "type": "tag"},
{"field_name": "amount", "name": "amount", "type": "count"},
{"field_name": "elements", "name": "el", "type": "count"},
],
"tags": ['tag1:val1', 'tag2:val2'],
},
{
"query": {'count': "foo", 'query': {'1': {'$type': 16}}},
"metric_prefix": "dd.custom.mongo.count",
"tags": ['tag1:val1', 'tag2:val2'],
"count_type": 'gauge',
},
{
"query": {
'aggregate': "orders",
'pipeline': [
{"$match": {"status": "A"}},
{"$group": {"_id": "$cust_id", "total": {"$sum": "$amount"}}},
{"$sort": {"total": -1}},
],
'cursor': {},
},
"fields": [
{"field_name": "total", "name": "total", "type": "count"},
{"field_name": "_id", "name": "cluster_id", "type": "tag"},
],
"metric_prefix": "dd.custom.mongo.aggregate",
"tags": ['tag1:val1', 'tag2:val2'],
},
],
}
@pytest.fixture(scope='session')
def instance_1valid_and_1invalid_custom_queries():
return {
'server': 'mongodb://testUser2:testPass2@{}:{}/test'.format(common.HOST, common.PORT1),
'custom_queries': [
{
"metric_prefix": "dd.custom.mongo.count",
# invalid query with missing query, skipped with error/warning logs
},
{
"query": {'count': "foo", 'query': {'1': {'$type': 16}}},
"metric_prefix": "dd.custom.mongo.count",
"tags": ['tag1:val1', 'tag2:val2'],
"count_type": 'gauge',
},
],
}
@pytest.fixture
def check():
return MongoDb('mongo', {}, {})
def setup_sharding(compose_file):
service_commands = [
('config01', 'mongo --port 27017 < /scripts/init-configserver.js'),
('shard01a', 'mongo --port 27018 < /scripts/init-shard01.js'),
('shard02a', 'mongo --port 27019 < /scripts/init-shard02.js'),
('shard03a', 'mongo --port 27020 < /scripts/init-shard03.js'),
('router', 'mongo < /scripts/init-router.js'),
]
for i, (service, command) in enumerate(service_commands, 1):
# Wait before router init
if i == len(service_commands):
time.sleep(20)
run_command(['docker-compose', '-f', compose_file, 'exec', '-T', service, 'sh', '-c', command], check=True)
class InitializeDB(LazyFunction):
def __call__(self):
cli = pymongo.mongo_client.MongoClient(
common.MONGODB_SERVER, socketTimeoutMS=30000, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED
)
foos = []
for i in range(70):
foos.append({'1': []})
foos.append({'1': i})
foos.append({})
bars = []
for _ in range(50):
bars.append({'1': []})
bars.append({})
orders = [
{"cust_id": "abc1", "status": "A", "amount": 50, "elements": 3},
{"cust_id": "xyz1", "status": "A", "amount": 100},
{"cust_id": "abc1", "status": "D", "amount": 50, "elements": 1},
{"cust_id": "abc1", "status": "A", "amount": 25},
{"cust_id": "xyz1", "status": "A", "amount": 25},
{"cust_id": "abc1", "status": "A", "amount": 300, "elements": 10},
]
db = cli['test']
db.foo.insert_many(foos)
db.bar.insert_many(bars)
db.orders.insert_many(orders)
auth_db = cli['authDB']
auth_db.command("createUser", 'testUser', pwd='testPass', roles=[{'role': 'read', 'db': 'test'}])
db.command("createUser", 'testUser2', pwd='testPass2', roles=[{'role': 'read', 'db': 'test'}])
| 34.89375
| 116
| 0.516747
|
b4a6552c3e9c361e15efa6abdb41ac6c100c44b1
| 25,463
|
py
|
Python
|
server/tests-py/test_graphql_queries.py
|
open-source-explorer/graphql-engine
|
d14300e9c917ed433eebd99d2ef1096951bfe840
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
server/tests-py/test_graphql_queries.py
|
open-source-explorer/graphql-engine
|
d14300e9c917ed433eebd99d2ef1096951bfe840
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
server/tests-py/test_graphql_queries.py
|
open-source-explorer/graphql-engine
|
d14300e9c917ed433eebd99d2ef1096951bfe840
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import yaml
import pytest
from validate import check_query_f
from super_classes import DefaultTestSelectQueries
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBasic(DefaultTestSelectQueries):
def test_select_query_author(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_author.yaml', transport)
def test_select_various_postgres_types(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_test_types.yaml', transport)
def test_select_query_author_quoted_col(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_author_col_quoted.yaml', transport)
def test_select_query_author_pk(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_author_by_pkey.yaml', transport)
def test_select_query_where(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_author_where.yaml', transport)
def test_nested_select_query_article_author(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/nested_select_query_article_author.yaml', transport)
def test_nested_select_query_deep(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/nested_select_query_deep.yaml', transport)
def test_nested_select_query_where(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/nested_select_where_query_author_article.yaml', transport)
def test_nested_select_query_where_on_relationship(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/nested_select_query_article_author_where_on_relationship.yaml', transport)
def test_select_query_user(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/select_query_user.yaml", transport)
def test_select_query_non_tracked_table(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/select_query_non_tracked_table_err.yaml", transport)
def test_select_query_col_not_present_err(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/select_query_author_col_not_present_err.yaml", transport)
def test_select_query_user_col_change(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/select_query_user_col_change.yaml")
def test_nested_select_with_foreign_key_alter(self, hge_ctx, transport):
transport = 'http'
check_query_f(hge_ctx, self.dir() + "/nested_select_with_foreign_key_alter.yaml", transport)
def test_select_query_invalid_escape_sequence(self, hge_ctx, transport):
transport = 'http'
check_query_f(hge_ctx, self.dir() + "/select_query_invalid_escape_sequence.yaml", transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/basic'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryAgg(DefaultTestSelectQueries):
def test_article_agg_count_sum_avg_max_min_with_aliases(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/article_agg_count_sum_avg_max_min_with_aliases.yaml', transport)
def test_article_agg_where(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/article_agg_where.yaml', transport)
def test_author_agg_with_articles(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author_agg_with_articles.yaml', transport)
def test_author_agg_with_articles_where(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author_agg_with_articles_where.yaml', transport)
def test_article_deeply_nested_aggregate(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/article_deeply_nested_aggregate.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/aggregations'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryAggPerm(DefaultTestSelectQueries):
def test_author_agg_articles(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author_agg_articles.yaml', transport)
def test_article_agg_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/article_agg_fail.yaml', transport)
def test_author_articles_agg_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author_articles_agg_fail.yaml', transport)
def test_author_post_agg_order_by(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author_post_agg_order_by.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/agg_perm'
class TestGraphQLQueryLimits(DefaultTestSelectQueries):
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_limit_1(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_limit_1.yaml', transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_limit_2(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_limit_2.yaml', transport)
def test_limit_null(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/select_query_article_limit_null.yaml')
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_err_str_limit_error(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_string_limit_error.yaml', transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_err_neg_limit_error(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_neg_limit_error.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/limits'
class TestGraphQLQueryOffsets(DefaultTestSelectQueries):
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_offset_1_limit_2(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_offset_1_limit_2.yaml', transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_offset_2_limit_1(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_offset_2_limit_1.yaml', transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_int_as_string_offset(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_article_string_offset.yaml', transport)
def test_err_neg_offset_error(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/select_query_article_neg_offset_error.yaml')
@classmethod
def dir(cls):
return 'queries/graphql_query/offset'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBoolExpBasic(DefaultTestSelectQueries):
def test_author_article_where_not_equal(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_neq.yaml', transport)
def test_author_article_operator_ne_not_found_err(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_operator_ne_not_found_err.yaml', transport)
def test_author_article_where_greater_than(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_gt.yaml', transport)
def test_author_article_where_greater_than_or_equal(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_gte.yaml', transport)
def test_author_article_where_less_than(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_lt.yaml', transport)
def test_author_article_where_less_than_or_equal(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_lte.yaml', transport)
def test_author_article_where_in(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_in.yaml', transport)
def test_author_article_where_in_empty_array(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_in_empty_array.yaml', transport)
def test_author_article_where_nin_empty_array(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_nin_empty_array.yaml', transport)
def test_author_article_where_nin(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_nin.yaml', transport)
def test_uuid_test_in_uuid_col(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_uuid_test_in_uuid_col.yaml', transport)
def test_order_delivered_at_is_null(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_order_delivered_at_is_null.yaml', transport)
def test_order_delivered_at_is_not_null(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_query_order_delivered_at_is_not_null.yaml', transport)
def test_author_article_where_not_less_than(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_where_not_lt.yaml', transport)
def test_article_author_is_published_and_registered(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_article_author_is_published_and_registered.yaml', transport)
def test_article_author_not_published_nor_registered(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_article_author_not_published_or_not_registered.yaml', transport)
def test_article_author_unexpected_operator_in_where_err(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_unexpected_operator_in_where_err.yaml', transport)
def test_self_referential_relationships(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/self_referential_relationships.yaml', transport)
def test_query_account_permission_success(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_account_permission_success.yaml', transport)
def test_query_account_permission_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_account_permission_fail.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/boolexp/basic'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphqlQueryPermissions(DefaultTestSelectQueries):
def test_user_select_unpublished_articles(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_select_query_unpublished_articles.yaml', transport)
def test_user_only_other_users_published_articles(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_can_query_other_users_published_articles.yaml', transport)
def test_anonymous_only_published_articles(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/anonymous_can_only_get_published_articles.yaml', transport)
def test_user_cannot_access_remarks_col(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_cannot_access_remarks_col.yaml', transport)
def test_user_can_query_geometry_values_filter(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_can_query_geometry_values_filter.yaml', transport)
def test_user_can_query_geometry_values_filter_session_vars(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_can_query_geometry_values_filter_session_vars.yaml', transport)
def test_user_can_query_jsonb_values_filter(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_can_query_jsonb_values_filter.yaml', transport)
def test_user_can_query_jsonb_values_filter_session_vars(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_can_query_jsonb_values_filter_session_vars.yaml', transport)
def test_artist_select_query_Track_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/artist_select_query_Track_fail.yaml', transport)
def test_artist_select_query_Track(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/artist_select_query_Track.yaml', transport)
def test_artist_search_tracks(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/artist_search_tracks.yaml', transport)
def test_artist_search_tracks_aggregate(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/artist_search_tracks_aggregate.yaml', transport)
def test_staff_passed_students(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/staff_passed_students.yaml', transport)
def test_user_query_auction(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/user_query_auction.yaml', transport)
@pytest.mark.xfail(reason="Refer https://github.com/hasura/graphql-engine-internal/issues/252")
def test_jsonb_has_all(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/jsonb_has_all.yaml', transport)
def test_jsonb_has_any(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/jsonb_has_any.yaml', transport)
def test_in_and_nin(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/in_and_nin.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/permissions'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBoolExpSearch(DefaultTestSelectQueries):
def test_city_where_like(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_like.yaml', transport)
def test_city_where_not_like(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_nlike.yaml', transport)
def test_city_where_ilike(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_ilike.yaml', transport)
def test_city_where_not_ilike(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_nilike.yaml', transport)
def test_city_where_similar(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_similar.yaml', transport)
def test_city_where_not_similar(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_city_where_not_similar.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/boolexp/search'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBoolExpJsonB(DefaultTestSelectQueries):
def test_jsonb_contains_article_latest(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_article_author_jsonb_contains_latest.yaml', transport)
def test_jsonb_contains_article_beststeller(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_author_article_jsonb_contains_bestseller.yaml', transport)
def test_jsonb_contained_in_latest(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_article_author_jsonb_contained_in_latest.yaml', transport)
def test_jsonb_contained_in_bestseller_latest(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_article_author_jsonb_contained_in_bestseller_latest.yaml', transport)
def test_jsonb_has_key_sim_type(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_product_jsonb_has_key_sim_type.yaml', transport)
def test_jsonb_has_keys_any_os_operating_system(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_product_jsonb_has_keys_any_os_operating_system.yaml', transport)
def test_jsonb_has_keys_all_touchscreen_ram(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_product_jsonb_has_keys_all_ram_touchscreen.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/boolexp/jsonb'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBoolExpPostGIS(DefaultTestSelectQueries):
def test_query_using_point(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_using_point.yaml', transport)
def test_query_using_line(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_using_line.yaml', transport)
def test_query_using_polygon(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_using_polygon.yaml', transport)
def test_query_geography_spatial_ops(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_geography_spatial_ops.yaml', transport)
def test_query_cast_geometry_to_geography(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_cast_geometry_to_geography.yaml', transport)
def test_query_cast_geography_to_geometry(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_cast_geography_to_geometry.yaml', transport)
def test_query_illegal_cast_is_not_allowed(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_illegal_cast_is_not_allowed.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/boolexp/postgis'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryBoolExpRaster(DefaultTestSelectQueries):
def test_query_st_intersects_geom_nband(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_st_intersects_geom_nband.yaml', transport)
def test_query_st_intersects_geom_nband_no_rows(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_st_intersects_geom_nband_no_rows.yaml', transport)
def test_query_st_intersects_rast(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_st_intersects_rast.yaml', transport)
def test_query_st_intersects_rast_no_rows(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_st_intersects_rast_no_rows.yaml', transport)
def test_query_st_intersects_rast_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/query_st_intersects_rast_fail.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/boolexp/raster'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryOrderBy(DefaultTestSelectQueries):
def test_articles_order_by_without_id(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/articles_order_by_without_id.yaml', transport)
def test_articles_order_by_rel_author_id(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/articles_order_by_rel_author_id.yaml', transport)
def test_articles_order_by_rel_author_rel_contact_phone(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/articles_order_by_rel_author_rel_contact_phone.yaml', transport)
def test_articles_order_by_null(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/articles_order_by_null.yaml', transport)
def test_album_order_by_tracks_count(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/album_order_by_tracks_count.yaml', transport)
def test_album_order_by_tracks_duration_avg(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/album_order_by_tracks_duration_avg.yaml', transport)
def test_album_order_by_tracks_max_name(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/album_order_by_tracks_max_name.yaml', transport)
def test_album_order_by_tracks_bytes_stddev(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/album_order_by_tracks_bytes_stddev.yaml', transport)
def test_employee_distinct_department_order_by_salary_desc(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/employee_distinct_department_order_by_salary_desc.yaml', transport)
def test_employee_distinct_department_order_by_salary_asc(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/employee_distinct_department_order_by_salary_asc.yaml', transport)
def test_employee_distinct_fail(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/employee_distinct_fail.yaml', transport)
def test_album_order_by_tracks_tags(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/album_order_by_tracks_tags.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/order_by'
class TestGraphQLQueryFunctions(DefaultTestSelectQueries):
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_search_posts(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/query_search_posts.yaml")
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_search_posts_aggregate(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/query_search_posts_aggregate.yaml")
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_query_get_users(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/query_get_users.yaml", transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_query_get_users_arguments_error(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/query_get_users_arguments_error.yaml", transport)
@pytest.mark.parametrize("transport", ['http', 'websocket'])
def test_query_get_users_default_arguments_error(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + "/query_get_users_default_arguments_error.yaml", transport)
def test_alter_function_error(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/alter_function_error.yaml')
def test_overloading_function_error(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/overloading_function_error.yaml')
def test_query_get_test_uuid(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/query_get_test_uuid.yaml')
def test_query_my_add(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/query_my_add.yaml')
@classmethod
def dir(cls):
return 'queries/graphql_query/functions'
@pytest.mark.parametrize("transport", ['http', 'websocket'])
class TestGraphQLQueryCustomSchema(DefaultTestSelectQueries):
def test_author(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/author.yaml', transport)
def test_article(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/article.yaml', transport)
@classmethod
def dir(cls):
return 'queries/graphql_query/custom_schema'
@pytest.mark.parametrize('transport', ['http', 'websocket'])
class TestGraphQLQueryEnums(DefaultTestSelectQueries):
@classmethod
def dir(cls):
return 'queries/graphql_query/enums'
def test_introspect(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/introspect.yaml', transport)
def test_select_enum_field(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_enum_field.yaml', transport)
def test_select_where_enum_eq(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq.yaml', transport)
def test_select_where_enum_eq_bad_value(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq_bad_value.yaml', transport)
def test_select_where_enum_eq_string(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq_string.yaml', transport)
def test_select_where_enum_eq_variable(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq_variable.yaml', transport)
def test_select_where_enum_eq_variable_bad_value(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq_variable_bad_value.yaml', transport)
def test_select_where_enum_eq_without_enum_table_visibility(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/select_where_enum_eq_without_enum_table_visibility.yaml', transport)
@pytest.mark.parametrize('transport', ['http', 'websocket'])
class TestGraphQLQueryComputedFields(DefaultTestSelectQueries):
@classmethod
def dir(cls):
return 'queries/graphql_query/computed_fields'
def test_computed_fields(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/computed_fields.yaml', transport)
def test_computed_fields_permission(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/computed_fields_permission.yaml', transport)
@pytest.mark.parametrize('transport', ['http', 'websocket'])
class TestGraphQLQueryCaching(DefaultTestSelectQueries):
@classmethod
def dir(cls):
return 'queries/graphql_query/caching'
def test_include_directive(self, hge_ctx, transport):
check_query_f(hge_ctx, self.dir() + '/include_directive.yaml', transport)
| 48.225379
| 122
| 0.756392
|
932fb4975d926b5d0482909258077d62cbdf308b
| 12,700
|
py
|
Python
|
model_compression_toolkit/keras/keras_implementation.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/keras/keras_implementation.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/keras/keras_implementation.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Any, Tuple, Callable
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from model_compression_toolkit import QuantizationConfig, FrameworkInfo, common, GradientPTQConfig, \
MixedPrecisionQuantizationConfig
from model_compression_toolkit.common import Graph, BaseNode
from model_compression_toolkit.common.collectors.statistics_collector import BaseStatsCollector
from model_compression_toolkit.common.framework_implementation import FrameworkImplementation
from model_compression_toolkit.common.model_builder_mode import ModelBuilderMode
from model_compression_toolkit.common.node_prior_info import NodePriorInfo
from model_compression_toolkit.common.user_info import UserInformation
from model_compression_toolkit.keras.back2framework.model_builder import model_builder
from model_compression_toolkit.keras.default_framework_info import DEFAULT_KERAS_INFO
from model_compression_toolkit.keras.gradient_ptq.training_wrapper import gptq_training_wrapper
from model_compression_toolkit.keras.graph_substitutions.substitutions.activation_decomposition import \
ActivationDecomposition
from model_compression_toolkit.keras.graph_substitutions.substitutions.batchnorm_folding import \
keras_batchnorm_folding
from model_compression_toolkit.keras.graph_substitutions.substitutions.input_scaling import InputScaling, \
InputScalingWithPad
from model_compression_toolkit.keras.graph_substitutions.substitutions.mark_activation import MarkActivation
from model_compression_toolkit.keras.graph_substitutions.substitutions.relu_bound_correction import \
ReLUBoundCorrection
from model_compression_toolkit.keras.graph_substitutions.substitutions.remove_relu_upper_bound import \
RemoveReLUUpperBound
from model_compression_toolkit.keras.graph_substitutions.substitutions.scale_equalization import \
ScaleEqualization, ScaleEqualizationWithPad, ScaleEqualizationMidActivation, ScaleEqualizationMidActivationWithPad
from model_compression_toolkit.keras.graph_substitutions.substitutions.separableconv_decomposition import \
SeparableConvDecomposition
from model_compression_toolkit.keras.graph_substitutions.substitutions.shift_negative_activation import \
keras_apply_shift_negative_correction
from model_compression_toolkit.keras.keras_node_prior_info import create_node_prior_info
from model_compression_toolkit.keras.mixed_precision.sensitivity_evaluation import get_sensitivity_evaluation
from model_compression_toolkit.keras.reader.reader import model_reader
from model_compression_toolkit.common.collectors.statistics_collector_generator import create_stats_collector_for_node
import model_compression_toolkit.keras.constants as keras_constants
from model_compression_toolkit.keras.tf_tensor_numpy import tf_tensor_to_numpy, to_tf_tensor
class KerasImplementation(FrameworkImplementation):
"""
An class with implemented methods to support optimizing Keras models.
"""
def __init__(self):
super().__init__()
@property
def constants(self):
"""
Returns: Module of Keras constants.
"""
return keras_constants
def model_reader(self,
model: Model,
representative_data_gen: Callable) -> Graph:
"""
Convert a framework's model into a graph.
Args:
model: Framework's model.
representative_data_gen (Callable): Dataset used for calibration.
Returns:
Graph representing the input model.
"""
return model_reader(model)
def to_numpy(self, tensor: tf.Tensor) -> np.ndarray:
"""
Convert framework's tensor to a Numpy array.
Args:
tensor: Framework's tensor.
Returns:
Numpy array converted from the input tensor.
"""
return tf_tensor_to_numpy(tensor)
def to_tensor(self, tensor: np.ndarray) -> np.ndarray:
"""
Convert a Numpy array to a framework's tensor.
Args:
tensor: Numpy array.
Returns:
Framework's tensor converted from the input Numpy array.
"""
return to_tf_tensor(tensor)
def model_builder(self,
graph: Graph,
mode: ModelBuilderMode,
append2output: List[Any] = None,
fw_info: FrameworkInfo = DEFAULT_KERAS_INFO) -> Tuple[Model, UserInformation]:
"""
Build a Keras model from a graph.
The mode determines how the model should be build. append2output is a list of Nodes
to set as the model outputs.
Args:
graph: Graph to build the model from it.
mode: Mode for how to build the model.
append2output: List of Nodes to set as the model's outputs.
fw_info: FrameworkInfo object with information about the specific framework's model
Returns:
A tuple of the Keras model that was built and an UserInformation object.
"""
return model_builder(graph,
mode,
append2output,
fw_info)
def run_model_inference(self,
model: Any,
input_list: List[Any]) -> Tuple[Any]:
"""
Run the model logic on the given the inputs.
Args:
model: Keras model.
input_list: List of inputs for the model.
Returns:
The Keras model's output.
"""
return model(input_list)
def shift_negative_correction(self,
graph: Graph,
qc: QuantizationConfig,
fw_info: FrameworkInfo) -> Graph:
"""
Apply shift negative correction (SNC) on a graph.
Args:
graph: Graph to apply SNC on.
qc: Quantization configuration.
fw_info: FrameworkInfo object with information about the specific framework's model.
Returns:
Graph after SNC.
"""
return keras_apply_shift_negative_correction(graph,
qc,
fw_info)
def attach_sc_to_node(self,
node: BaseNode,
output_channel_index: int) -> BaseStatsCollector:
"""
Return a statistics collector that should be attached to a node's output
during statistics collection.
Args:
node: Node to return its collector.
output_channel_index: Index of output channels of layers in the model's framework.
Returns:
Statistics collector for the node.
"""
return create_stats_collector_for_node(node,
output_channel_index=output_channel_index)
def get_substitutions_marking(self) -> List[common.BaseSubstitution]:
"""
Returns: A list of the framework substitutions used for marking
points we fuse.
"""
return [MarkActivation()]
def get_substitutions_pre_statistics_collection(self) -> List[common.BaseSubstitution]:
"""
Returns: A list of the framework substitutions used before we build a quantized model.
"""
return [SeparableConvDecomposition(),
ActivationDecomposition(),
keras_batchnorm_folding()]
def get_substitutions_post_statistics_collection(self, quant_config: QuantizationConfig) -> List[
common.BaseSubstitution]:
"""
Return a list of the framework substitutions used after we collect statistics.
Args:
quant_config: QuantizationConfig to determine which substitutions to return.
Returns:
A list of the framework substitutions used after we collect statistics.
"""
substitutions_list = []
if quant_config.input_scaling:
substitutions_list.append(InputScaling())
substitutions_list.append(InputScalingWithPad())
if quant_config.relu_unbound_correction:
substitutions_list.append(ReLUBoundCorrection())
return substitutions_list
def get_substitutions_channel_equalization(self,
quant_config: QuantizationConfig,
fw_info: FrameworkInfo) -> List[common.BaseSubstitution]:
"""
Return a list of the framework substitutions used for channel equalization.
Args:
quant_config: QuantizationConfig to determine which substitutions to return.
fw_info: FrameworkInfo object with information about the specific framework's model.
Returns:
A list of the framework substitutions used after we collect statistics.
"""
substitutions_list = []
if quant_config.activation_channel_equalization:
substitutions_list.extend([ScaleEqualization(quant_config, fw_info),
ScaleEqualizationWithPad(quant_config, fw_info),
ScaleEqualizationMidActivation(quant_config, fw_info),
ScaleEqualizationMidActivationWithPad(quant_config, fw_info)])
return substitutions_list
def get_substitutions_pre_build(self) -> List[common.BaseSubstitution]:
"""
Returns: A list of the framework substitutions used before we build a quantized model.
"""
return [RemoveReLUUpperBound()]
def gptq_training(self,
graph: Graph,
representative_data_gen: Callable,
gptq_config: GradientPTQConfig,
fw_info: FrameworkInfo) -> Graph:
"""
Update a graph using GPTQ after minimizing the loss between the float model's output
and the quantized model's outputs.
Args:
graph: Graph to fine-tune.
representative_data_gen: Dataset to use for inputs of the models.
gptq_config: GradientPTQConfig with configuration for the fine-tuning process.
fw_info: FrameworkInfo object with information about the specific framework's model.
Returns:
Updated graph after GPTQ.
"""
return gptq_training_wrapper(graph,
representative_data_gen,
gptq_config,
fw_info)
def get_sensitivity_evaluation_fn(self,
graph: Graph,
quant_config: MixedPrecisionQuantizationConfig,
metrics_weights: np.ndarray,
representative_data_gen: Callable,
fw_info: FrameworkInfo) -> Callable:
"""
Create and return a function to compute a sensitivity metric for a mixed-precision
configuration (comparing to the float Keras model).
Args:
graph: Graph to build it's float and mixed-precision Keras models.
quant_config: QuantizationConfig of how the model should be quantized.
metrics_weights: Array of weights to weight the sensitivity among different layers.
representative_data_gen: Dataset to use for retrieving images for the models inputs.
fw_info: FrameworkInfo object with information about the specific framework's model.
Returns:
A function that computes the metric.
"""
return get_sensitivity_evaluation(graph,
quant_config,
metrics_weights,
representative_data_gen,
fw_info)
def get_node_prior_info(self,
node: BaseNode,
fw_info: FrameworkInfo) -> NodePriorInfo:
"""
Get a NodePriorInfo object for a node that represents a Keras layer.
Args:
node: Node to get its prior info.
fw_info: Framework specific information needed to create the prior info of the node.
Returns:
NodePriorInfo with information about the node.
"""
return create_node_prior_info(node=node,
fw_info=fw_info)
| 41.368078
| 118
| 0.642677
|
d572d6b5b7aec9fbc05f3252f633f14a53ddedb2
| 5,536
|
py
|
Python
|
dist_covid2.py
|
StraigenDaigen/social_distancing_ai
|
226ff20c0343d02280461527c9ff3c6fbf6dfcf5
|
[
"MIT"
] | 8
|
2021-08-21T09:39:15.000Z
|
2021-08-22T09:35:51.000Z
|
dist_covid2.py
|
StraigenDaigen/social_distancing_ai
|
226ff20c0343d02280461527c9ff3c6fbf6dfcf5
|
[
"MIT"
] | null | null | null |
dist_covid2.py
|
StraigenDaigen/social_distancing_ai
|
226ff20c0343d02280461527c9ff3c6fbf6dfcf5
|
[
"MIT"
] | 1
|
2021-08-30T20:40:10.000Z
|
2021-08-30T20:40:10.000Z
|
import jetson.inference
import jetson.utils
import cv2
import argparse
import sys
import numpy as np
#import imutils
import itertools
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
# parse the command line
parser = argparse.ArgumentParser(description="Monitoreo de distanciamiento social por COVID-19")
parser.add_argument("input_URI", type=str, default="", nargs='?', help="Directorio del video de entrada")
parser.add_argument("output_URI", type=str, default="", nargs='?', help="Directorio para guargar video")
parser.add_argument("--network", type=str, default="ssd-mobilenet-v2", help="Escoger modelo pre-entrenado (las opciones están abajo)")
parser.add_argument("--overlay", type=str, default="box,labels,conf", help="detection overlay flags (e.g. --overlay=box,labels,conf)\nvalid combinations are: 'box', 'labels', 'conf', 'none'")
parser.add_argument("--threshold", type=float, default=0.15, help="valor minimo de precisión para las detecciones")
try:
opt = parser.parse_known_args()[0]
except:
print("")
parser.print_help()
sys.exit(0)
# load the object detection network
net = jetson.inference.detectNet(opt.network, sys.argv, opt.threshold)
# create video sources & outputs
input = jetson.utils.videoSource(opt.input_URI, argv=sys.argv)
output = jetson.utils.videoOutput(opt.output_URI, argv=sys.argv)
cap = cv2.VideoCapture(opt.input_URI)
area_real_pts = np.array([[420,110],[642,100],[1069,594],[73,636]])
#Valores para centrar el mapa en la ventana
w_mapa_1 = int((800/2)+(200))#(1266/4))
w_mapa_2 = int((800/2)-(200))#(1266/4))
area_mapa_pts = np.array([[w_mapa_2,0],[w_mapa_1,0],[w_mapa_1,448],[w_mapa_2,448]])
#PERPECTIVE TRANSFORM Y WARP PERSPECTIVE
src_pts = np.array([[425,110],[642,100],[1069,594],[73,636]], dtype=np.float32)
dst_pts = np.array([[w_mapa_2,0],[w_mapa_1,0],[w_mapa_1,710],[w_mapa_2,710]], dtype=np.float32)
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
# process frames until the user exits
while True:
ret,frame=cap.read()
# capture the next image
img = input.Capture()
#frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#frame_rgba = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
#width = frame.shape[1]
#height = frame.shape[0]
#cuda_mem = jetson.utils.cudaFromNumpy(frame)
# detect objects in the image (with overlay)
detections = net.Detect(img, overlay=opt.overlay)
#detections = net.Detect(cuda_mem, width, height)
#_,_,center,_,_,_,_,_,_ = net.Detect(img, overlay=opt.overlay)
#print("<<<<<DESDE AQUI>>>>")
#print(detections[0].Center)
#print("<<<<<HASTA AQUI>>>>")
# print the detections
print("detected {:d} objects in image".format(len(detections)))
frame = np.array(img)
#frame = imutils.resize(frame, width = 800)
#frame = imutils.resize(frame, width = 800)
imgb = cv2.cvtColor(frame,cv2.COLOR_RGBA2BGR)
imgAux = np.zeros(shape=(imgb.shape[:2]),dtype=np.uint8)
#imgAux = cv2.drawContours(imgAux,[area_mapa_pts],-1,(255),-1)
imgAux = cv2.drawContours(imgAux,[area_real_pts],-1,(255),-1)
warp = cv2.warpPerspective(imgAux, M, (w_mapa_1+w_mapa_2, 710))
puntos_real=[]
puntos_p=[]
for detection in detections:
pto_x = (detection.Center)[0]
pto_y = ((detection.Center)[1])+((detection.Height)/2)
p_real=(int(pto_x),int(pto_y))
puntos_real.append(p_real)
cv2.circle(imgb,(int(pto_x),int(pto_y)),5,(0,255,0),-1)
p_mapa_x = (M[0][0]*pto_x + M[0][1]*pto_y + M[0][2]) / ((M[2][0]*pto_x + M[2][1]*pto_y + M[2][2]))
p_mapa_y = (M[1][0]*pto_x + M[1][1]*pto_y + M[1][2]) / ((M[2][0]*pto_x + M[2][1]*pto_y + M[2][2]))
p_mapa = (int(p_mapa_x),int(p_mapa_y))
cv2.circle(warp, p_mapa,5,(0,255,0),-1)
puntos_p.append(p_mapa)
if len(puntos_p)>1:
for punto1, punto2 in itertools.combinations(puntos_p, 2):
x_p_trans = puntos_p.index(punto1)
y_p_trans = puntos_p.index(punto2)
cv2.line(imgb, puntos_real[x_p_trans], puntos_real[y_p_trans], [133, 133, 133], 1)
#distancia=dist.euclidean(punto1,punto2)
distancia = np.linalg.norm(np.array(punto1)-np.array(punto2))
if distancia < 75:
cv2.line(imgb, puntos_real[x_p_trans], puntos_real[y_p_trans], [0, 0, 255], 2)
(alto, ancho)=imgb.shape[:2]
peligro="PELIGRO DE CONTAGIO"
cv2.putText(imgb, peligro, (int(alto*0.55), int(ancho*0.55)),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
aforo="Aforo: " + str(len(detections))
(alto, ancho)=imgb.shape[:2]
cv2.putText(imgb, aforo, (int(alto*0.1), int(ancho*0.55)),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# render the image
#output.Render(img)
#output.SetStatus("{:s} | Rendimiento {:.0f} FPS".format("DISTANCIAMIENTO DE PERSONAS", net.GetNetworkFPS()))
cv2.drawContours(imgb,[area_real_pts],-1,(0,0,0),2)
#image_rgb=cv2.cvtColor(imgb,cv2.COLOR_BGR2RGB)
cv2.putText(imgb,"| Rendimiento {:.0f} FPS".format(net.GetNetworkFPS()) , (int(alto*1.2), int(ancho*0.55)),cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
cv2.imshow("DISTANCIAMIENTO SOCIAL",imgb)
warpr = ResizeWithAspectRatio(warp, width=420)
cv2.imshow("MAPA DEL PLANO (Top view)",warpr)
key = cv2.waitKey(1) & 0xFF
if key == ord("q") or key==27:
break
# print out performance info
net.PrintProfilerTimes()
# exit on input/output EOS
if not input.IsStreaming() or not output.IsStreaming():
break
cap.release()
cv2.destroyAllWindows()
| 41.007407
| 192
| 0.70466
|
352f27cabaeb8124ff95bf2b8277a19f50119fae
| 215
|
py
|
Python
|
pyf/_has_sticky.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 1
|
2022-03-13T22:08:25.000Z
|
2022-03-13T22:08:25.000Z
|
pyf/_has_sticky.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | 21
|
2022-03-17T16:53:04.000Z
|
2022-03-31T23:55:24.000Z
|
pyf/_has_sticky.py
|
snoopyjc/pythonizer
|
6b3683084f41f0aa06b1b4e652a0f00b19cceac1
|
[
"Artistic-2.0"
] | null | null | null |
def _has_sticky(path): # -k
if not path:
return False
if hasattr(path, '_mode'):
return (path._mode & stat.S_ISVTX) != 0
return (os.stat(path).st_mode & stat.S_ISVTX) != 0
| 26.875
| 55
| 0.553488
|
e252a227ad5a83af1ff6e2c39d1e7ce07fc5502b
| 180
|
py
|
Python
|
salon/urls.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | null | null | null |
salon/urls.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | 5
|
2021-03-30T13:08:19.000Z
|
2021-09-22T18:54:13.000Z
|
salon/urls.py
|
amirRamirfatahi/beautstertest
|
53f6e515903fd11992fd1eb70760318b17896437
|
[
"MIT"
] | null | null | null |
from django.urls import path
from salon.api import views
urlpatterns = [
path(r'', views.listsalon_view, name='salons'),
path(r'<pk>', views.salon_view, name='salon')
]
| 18
| 51
| 0.683333
|
45a045f14cc57bb6270b293ae0ef7c2e7f09cb89
| 7,410
|
py
|
Python
|
autobuilder/workers/config.py
|
madisongh/autobuilder
|
5290f904334e0236d9c45ed6cf5334fed00d9660
|
[
"MIT"
] | null | null | null |
autobuilder/workers/config.py
|
madisongh/autobuilder
|
5290f904334e0236d9c45ed6cf5334fed00d9660
|
[
"MIT"
] | null | null | null |
autobuilder/workers/config.py
|
madisongh/autobuilder
|
5290f904334e0236d9c45ed6cf5334fed00d9660
|
[
"MIT"
] | 1
|
2018-12-07T16:20:57.000Z
|
2018-12-07T16:20:57.000Z
|
import os
import socket
import string
from random import SystemRandom
import jinja2
from buildbot.plugins import worker
from autobuilder.workers.ec2 import MyEC2LatentWorker
RNG = SystemRandom()
default_svp = {'name': '/dev/xvdf', 'size': 200,
'type': 'standard', 'iops': None}
class AutobuilderWorker(worker.Worker):
def __init__(self, name, password, conftext=None, max_builds=1):
if conftext:
conftext = [conftext] if isinstance(conftext, str) else conftext
else:
conftext = []
if max_builds > 1:
conftext += ['BB_NUMBER_THREADS = "${@oe.utils.cpu_count() // %d}"' % max_builds,
'PARALLEL_MAKE = "-j ${@oe.utils.cpu_count() // %d}"' % max_builds]
super().__init__(name, password, max_builds=max_builds, properties={'worker_extraconf': conftext})
class EC2Params(object):
def __init__(self, instance_type, ami, secgroup_ids, keypair=None,
region=None, subnet=None, elastic_ip=None, tags=None,
scratchvol=False, scratchvol_params=None,
instance_profile_name=None, spot_instance=False,
max_spot_price=None, price_multiplier=None,
instance_types=None, build_wait_timeout=None,
subnets=None):
self.instance_type = instance_type
self.instance_types = instance_types
self.ami = ami
self.keypair = keypair
self.region = region
self.secgroup_ids = secgroup_ids
self.subnet = subnet
self.subnets = subnets
self.elastic_ip = elastic_ip
self.tags = tags
if build_wait_timeout:
self.build_wait_timeout = build_wait_timeout
else:
self.build_wait_timeout = 0 if spot_instance else 300
if scratchvol:
self.scratchvolparams = scratchvol_params or default_svp
else:
self.scratchvolparams = None
self.instance_profile_name = instance_profile_name
self.spot_instance = spot_instance
if self.spot_instance:
if max_spot_price is None and price_multiplier is None:
raise ValueError('You must provide either max_spot_price, or '
'price_multiplier, or both, to use spot instances')
if instance_type:
if instance_types:
raise ValueError('Specify only one of instance_type, instance_types '
'for spot instances')
self.instance_types = [instance_type]
self.instance_type = None
else:
if not instance_types:
raise ValueError('Missing instance_types for spot instance worker config')
if subnet:
if subnets:
raise ValueError('Specify only one of subnet, subnets for spot instances')
self.subnets = [subnet]
self.subnet = None
elif not subnets:
raise ValueError('Missing subnets for spot instance worker config')
else:
if instance_types:
raise ValueError('instance_types only valid for spot instance worker configs')
if subnets:
raise ValueError('subnets only valid for spot instance worker configs')
if not instance_type:
raise ValueError('Invalid instance_type')
self.max_spot_price = max_spot_price
self.price_multiplier = price_multiplier
class AutobuilderEC2Worker(MyEC2LatentWorker):
master_hostname = socket.gethostname()
master_ip_address = os.getenv('MASTER_IP_ADDRESS') or socket.gethostbyname(master_hostname)
master_fqdn = socket.getaddrinfo(master_hostname, 0, flags=socket.AI_CANONNAME)[0][3]
def __init__(self, name, password, ec2params, conftext=None, max_builds=1,
userdata_template_dir=None, userdata_template_file='cloud-init.txt',
userdata_dict=None):
if not password:
password = ''.join(RNG.choice(string.ascii_letters + string.digits) for _ in range(16))
if conftext:
conftext = [conftext] if isinstance(conftext, str) else conftext
else:
conftext = []
if max_builds > 1:
conftext += ['BB_NUMBER_THREADS = "${@oe.utils.cpu_count() // %d}"' % max_builds,
'PARALLEL_MAKE = "-j ${@oe.utils.cpu_count() // %d}"' % max_builds]
ec2tags = ec2params.tags
if ec2tags:
if 'Name' not in ec2tags:
tagscopy = ec2tags.copy()
tagscopy['Name'] = name
ec2tags = tagscopy
else:
ec2tags = {'Name': name}
ec2_dev_mapping = None
svp = ec2params.scratchvolparams
if svp:
ebs = {
'VolumeType': svp['type'],
'VolumeSize': svp['size'],
'DeleteOnTermination': True
}
if 'encrypted' in svp:
ebs['Encrypted'] = svp['encrypted']
if svp['type'] == 'io1':
if svp['iops']:
ebs['Iops'] = svp['iops']
else:
ebs['Iops'] = 1000
ec2_dev_mapping = [
{'DeviceName': svp['name'], 'Ebs': ebs}
]
ctx = {'workername': name,
'workersecret': password,
'master_ip': self.master_ip_address,
'master_hostname': self.master_hostname,
'master_fqdn': self.master_fqdn,
'extra_packages': [],
'extra_cmds': []}
if userdata_dict:
ctx.update(userdata_dict)
if userdata_template_file:
if userdata_template_dir is None:
userdata_template_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates")
loader = jinja2.FileSystemLoader(userdata_template_dir)
env = jinja2.Environment(loader=loader, undefined=jinja2.StrictUndefined)
userdata = env.get_template(userdata_template_file).render(ctx)
else:
userdata = '\n'.join(['WORKERNAME={}',
'WORKERSECRET={}',
'MASTER={}']).format(name, password, self.master_ip_address)
self.userdata_extra_context = userdata_dict
super().__init__(name=name, password=password, max_builds=max_builds,
instance_type=ec2params.instance_type, ami=ec2params.ami,
keypair_name=ec2params.keypair, instance_profile_name=ec2params.instance_profile_name,
security_group_ids=ec2params.secgroup_ids, region=ec2params.region,
subnet_id=ec2params.subnet, subnet_ids=ec2params.subnets,
user_data=userdata, elastic_ip=ec2params.elastic_ip,
tags=ec2tags, block_device_map=ec2_dev_mapping,
spot_instance=ec2params.spot_instance, build_wait_timeout=ec2params.build_wait_timeout,
max_spot_price=ec2params.max_spot_price, price_multiplier=ec2params.price_multiplier,
instance_types=ec2params.instance_types,
properties={'worker_extraconf': conftext})
| 46.024845
| 112
| 0.588124
|
b63e9413ca9050802ac886fd1f16709eb0277273
| 758
|
py
|
Python
|
MyPyQt5LearnExamples/SepUiBack/calc_ui.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | null | null | null |
MyPyQt5LearnExamples/SepUiBack/calc_ui.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | null | null | null |
MyPyQt5LearnExamples/SepUiBack/calc_ui.py
|
prayjourney/on_the_way_ing
|
88d04752b7b18c6d60d74b18357f6b2c09c9748e
|
[
"MIT"
] | 1
|
2020-09-29T14:17:39.000Z
|
2020-09-29T14:17:39.000Z
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Calc(object):
def setupUi(self, Form):
self.inputSpinBox1 = QtWidgets.QSpinBox(Form)
self.inputSpinBox1.setGeometry(QtCore.QRect(1, 26, 46, 25))
self.inputSpinBox1.setObjectName("inputSpinBox1") # 必须
self.inputSpinBox2 = QtWidgets.QSpinBox(Form)
self.inputSpinBox2.setGeometry(QtCore.QRect(70, 26, 46, 25))
self.inputSpinBox2.setObjectName("inputSpinBox2") # 必须
self.outputWidget = QtWidgets.QLabel(Form)
self.outputWidget.setGeometry(QtCore.QRect(140, 24, 36, 27))
self.outputWidget.setObjectName("outputWidget") # 必须
QtCore.QMetaObject.connectSlotsByName(Form) # 必须
| 36.095238
| 69
| 0.667546
|
e5542e89c5dbf782ee5a8dfb1aabef98c2af381e
| 1,010
|
py
|
Python
|
isi_sdk_8_2_0/test/test_snapshot_schedule_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/test/test_snapshot_schedule_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/test/test_snapshot_schedule_create_params.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.snapshot_schedule_create_params import SnapshotScheduleCreateParams # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestSnapshotScheduleCreateParams(unittest.TestCase):
"""SnapshotScheduleCreateParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSnapshotScheduleCreateParams(self):
"""Test SnapshotScheduleCreateParams"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.snapshot_schedule_create_params.SnapshotScheduleCreateParams() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.634146
| 115
| 0.736634
|
80b5b718bbb622ce95fe7db66602cb6d5a76c279
| 626
|
py
|
Python
|
homeassistant/components/upnp/const.py
|
guiguid/core
|
d43617c41d6507f2d2b77aadf4fa1ebaf0058b14
|
[
"Apache-2.0"
] | 1
|
2020-04-07T15:44:54.000Z
|
2020-04-07T15:44:54.000Z
|
homeassistant/components/upnp/const.py
|
tomachristian/core
|
71c8fcee20c55536b33c3ee774c76c1795f37cd2
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:31:36.000Z
|
2022-03-12T01:00:03.000Z
|
homeassistant/components/upnp/const.py
|
tomachristian/core
|
71c8fcee20c55536b33c3ee774c76c1795f37cd2
|
[
"Apache-2.0"
] | 2
|
2020-04-19T13:35:24.000Z
|
2020-04-19T13:35:51.000Z
|
"""Constants for the IGD component."""
from datetime import timedelta
import logging
from homeassistant.const import TIME_SECONDS
CONF_ENABLE_PORT_MAPPING = "port_mapping"
CONF_ENABLE_SENSORS = "sensors"
CONF_HASS = "hass"
CONF_LOCAL_IP = "local_ip"
CONF_PORTS = "ports"
DOMAIN = "upnp"
LOGGER = logging.getLogger(__package__)
BYTES_RECEIVED = "bytes_received"
BYTES_SENT = "bytes_sent"
PACKETS_RECEIVED = "packets_received"
PACKETS_SENT = "packets_sent"
TIMESTAMP = "timestamp"
DATA_PACKETS = "packets"
DATA_RATE_PACKETS_PER_SECOND = f"{DATA_PACKETS}/{TIME_SECONDS}"
KIBIBYTE = 1024
UPDATE_INTERVAL = timedelta(seconds=30)
| 27.217391
| 63
| 0.798722
|
5b7dccbcd817ad33f9601e9de811bd7a14766fc8
| 1,399
|
py
|
Python
|
server/tests/factories/workflow_task_instance_factory.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | null | null | null |
server/tests/factories/workflow_task_instance_factory.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | 6
|
2021-11-01T01:35:40.000Z
|
2022-02-11T03:33:06.000Z
|
server/tests/factories/workflow_task_instance_factory.py
|
CloudReactor/task_manager
|
464ca74371064fabb9a21b1f5bacba30360932ab
|
[
"Fair"
] | null | null | null |
import factory
from faker import Factory as FakerFactory
from processes.models import WorkflowTaskInstance
from .task_factory import TaskFactory
from .workflow_factory import WorkflowFactory
faker = FakerFactory.create()
class WorkflowTaskInstanceFactory(factory.django.DjangoModelFactory):
class Meta:
model = WorkflowTaskInstance
name = factory.Sequence(lambda n: f'wti_{n}')
description = ''
workflow = factory.SubFactory(WorkflowFactory)
task = factory.SubFactory(TaskFactory)
start_transition_condition = WorkflowTaskInstance.START_TRANSITION_CONDITION_ANY
max_complete_executions = 1
should_eval_transitions_after_first_execution = False
condition_count_threshold = None
condition_ratio_threshold = None
max_age_seconds = None
default_max_retries = 0
environment_variables_overrides = None
allocated_cpu_units = None
allocated_memory_mb = None
use_task_alert_methods = False
failure_behavior = WorkflowTaskInstance.FAILURE_BEHAVIOR_FAIL_WORKFLOW_IF_UNHANDLED
allow_workflow_execution_after_failure = False
timeout_behavior = WorkflowTaskInstance.TIMEOUT_BEHAVIOR_TIMEOUT_WORKFLOW_IF_UNHANDLED
allow_workflow_execution_after_timeout = False
ui_color = ''
ui_center_margin_top = 0
ui_center_margin_left = 0
ui_icon_type = ''
ui_scale = 1.0
| 34.121951
| 91
| 0.77055
|
d7fd91d3d5be8f0f4c213dd95ce6e2a45d112b0a
| 6,193
|
py
|
Python
|
homeassistant/components/geonetnz_quakes/geo_location.py
|
tubalainen/core
|
2ba514253c4981062853f3e1981903d3e4aa803c
|
[
"Apache-2.0"
] | 2
|
2020-03-02T19:17:52.000Z
|
2020-03-02T19:17:53.000Z
|
homeassistant/components/geonetnz_quakes/geo_location.py
|
tubalainen/core
|
2ba514253c4981062853f3e1981903d3e4aa803c
|
[
"Apache-2.0"
] | 6
|
2021-02-08T21:05:36.000Z
|
2022-03-12T00:54:00.000Z
|
homeassistant/components/geonetnz_quakes/geo_location.py
|
tubalainen/core
|
2ba514253c4981062853f3e1981903d3e4aa803c
|
[
"Apache-2.0"
] | null | null | null |
"""Geolocation support for GeoNet NZ Quakes Feeds."""
import logging
from typing import Optional
from homeassistant.components.geo_location import GeolocationEvent
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_TIME,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_KILOMETERS,
LENGTH_MILES,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from .const import DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
ATTR_DEPTH = "depth"
ATTR_EXTERNAL_ID = "external_id"
ATTR_LOCALITY = "locality"
ATTR_MAGNITUDE = "magnitude"
ATTR_MMI = "mmi"
ATTR_PUBLICATION_DATE = "publication_date"
ATTR_QUALITY = "quality"
SOURCE = "geonetnz_quakes"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the GeoNet NZ Quakes Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
@callback
def async_add_geolocation(feed_manager, external_id, unit_system):
"""Add gelocation entity from feed."""
new_entity = GeonetnzQuakesEvent(feed_manager, external_id, unit_system)
_LOGGER.debug("Adding geolocation %s", new_entity)
async_add_entities([new_entity], True)
manager.listeners.append(
async_dispatcher_connect(
hass, manager.async_event_new_entity(), async_add_geolocation
)
)
hass.async_create_task(manager.async_update())
_LOGGER.debug("Geolocation setup done")
class GeonetnzQuakesEvent(GeolocationEvent):
"""This represents an external event with GeoNet NZ Quakes feed data."""
def __init__(self, feed_manager, external_id, unit_system):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._external_id = external_id
self._unit_system = unit_system
self._title = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._depth = None
self._locality = None
self._magnitude = None
self._mmi = None
self._quality = None
self._time = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass,
f"geonetnz_quakes_delete_{self._external_id}",
self._delete_callback,
)
self._remove_signal_update = async_dispatcher_connect(
self.hass,
f"geonetnz_quakes_update_{self._external_id}",
self._update_callback,
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
self._remove_signal_delete()
self._remove_signal_update()
@callback
def _delete_callback(self):
"""Remove this entity."""
self.hass.async_create_task(self.async_remove())
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GeoNet NZ Quakes feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._title = feed_entry.title
# Convert distance if not metric system.
if self._unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self._distance = IMPERIAL_SYSTEM.length(
feed_entry.distance_to_home, LENGTH_KILOMETERS
)
else:
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._depth = feed_entry.depth
self._locality = feed_entry.locality
self._magnitude = feed_entry.magnitude
self._mmi = feed_entry.mmi
self._quality = feed_entry.quality
self._time = feed_entry.time
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:pulse"
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return self._title
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self._unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
return LENGTH_MILES
return LENGTH_KILOMETERS
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_DEPTH, self._depth),
(ATTR_LOCALITY, self._locality),
(ATTR_MAGNITUDE, self._magnitude),
(ATTR_MMI, self._mmi),
(ATTR_QUALITY, self._quality),
(ATTR_TIME, self._time),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
| 32.941489
| 80
| 0.6601
|
9af248c25669af4a79e647e599cc613abc75a29f
| 412
|
py
|
Python
|
python/sentiments_analyse/Pre_Processing.py
|
donaldonana/sentiment_analyse
|
478953fbd0f612a8e20e1f95933466d429626ed2
|
[
"MIT"
] | null | null | null |
python/sentiments_analyse/Pre_Processing.py
|
donaldonana/sentiment_analyse
|
478953fbd0f612a8e20e1f95933466d429626ed2
|
[
"MIT"
] | null | null | null |
python/sentiments_analyse/Pre_Processing.py
|
donaldonana/sentiment_analyse
|
478953fbd0f612a8e20e1f95933466d429626ed2
|
[
"MIT"
] | null | null | null |
""" We’ll have to do some pre-processing to get the data into a usable format. To start,
we’ll construct a vocabulary of all words that exist in our data"""
from data_set import train_data, test_data
items = list(train_data.items())
with open('../../target.txt', 'w') as mon_fichier:
for x, y in items:
target = int(y)
print("\n",target,"\n")
mon_fichier.write(str(target)+"\n")
| 29.428571
| 91
| 0.65534
|
1c057d114c1e40e49d83f4b6ac10ef053dfef693
| 11,727
|
py
|
Python
|
nltk/corpus/reader/comparative_sents.py
|
Bharat123rox/nltk
|
ba989e5d568f007718cbe69272c885b4a7d1168d
|
[
"Apache-2.0"
] | 9,747
|
2015-01-01T05:47:33.000Z
|
2022-03-31T19:36:28.000Z
|
nltk/corpus/reader/comparative_sents.py
|
Bharat123rox/nltk
|
ba989e5d568f007718cbe69272c885b4a7d1168d
|
[
"Apache-2.0"
] | 2,155
|
2015-01-01T15:16:55.000Z
|
2022-03-31T07:44:43.000Z
|
nltk/corpus/reader/comparative_sents.py
|
Bharat123rox/nltk
|
ba989e5d568f007718cbe69272c885b4a7d1168d
|
[
"Apache-2.0"
] | 2,846
|
2015-01-03T23:16:34.000Z
|
2022-03-29T17:54:34.000Z
|
# Natural Language Toolkit: Comparative Sentence Corpus Reader
#
# Copyright (C) 2001-2021 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for the Comparative Sentence Dataset.
- Comparative Sentence Dataset information -
Annotated by: Nitin Jindal and Bing Liu, 2006.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Nitin Jindal, njindal@cs.uic.edu
Bing Liu, liub@cs.uic.edu (https://www.cs.uic.edu/~liub)
Distributed with permission.
Related papers:
- Nitin Jindal and Bing Liu. "Identifying Comparative Sentences in Text Documents".
Proceedings of the ACM SIGIR International Conference on Information Retrieval
(SIGIR-06), 2006.
- Nitin Jindal and Bing Liu. "Mining Comprative Sentences and Relations".
Proceedings of Twenty First National Conference on Artificial Intelligence
(AAAI-2006), 2006.
- Murthy Ganapathibhotla and Bing Liu. "Mining Opinions in Comparative Sentences".
Proceedings of the 22nd International Conference on Computational Linguistics
(Coling-2008), Manchester, 18-22 August, 2008.
"""
import re
from nltk.corpus.reader.api import *
from nltk.tokenize import *
# Regular expressions for dataset components
STARS = re.compile(r"^\*+$")
COMPARISON = re.compile(r"<cs-[1234]>")
CLOSE_COMPARISON = re.compile(r"</cs-[1234]>")
GRAD_COMPARISON = re.compile(r"<cs-[123]>")
NON_GRAD_COMPARISON = re.compile(r"<cs-4>")
ENTITIES_FEATS = re.compile(r"(\d)_((?:[\.\w\s/-](?!\d_))+)")
KEYWORD = re.compile(r"\(([^\(]*)\)$")
class Comparison:
"""
A Comparison represents a comparative sentence and its constituents.
"""
def __init__(
self,
text=None,
comp_type=None,
entity_1=None,
entity_2=None,
feature=None,
keyword=None,
):
"""
:param text: a string (optionally tokenized) containing a comparison.
:param comp_type: an integer defining the type of comparison expressed.
Values can be: 1 (Non-equal gradable), 2 (Equative), 3 (Superlative),
4 (Non-gradable).
:param entity_1: the first entity considered in the comparison relation.
:param entity_2: the second entity considered in the comparison relation.
:param feature: the feature considered in the comparison relation.
:param keyword: the word or phrase which is used for that comparative relation.
"""
self.text = text
self.comp_type = comp_type
self.entity_1 = entity_1
self.entity_2 = entity_2
self.feature = feature
self.keyword = keyword
def __repr__(self):
return (
'Comparison(text="{}", comp_type={}, entity_1="{}", entity_2="{}", '
'feature="{}", keyword="{}")'
).format(
self.text,
self.comp_type,
self.entity_1,
self.entity_2,
self.feature,
self.keyword,
)
class ComparativeSentencesCorpusReader(CorpusReader):
"""
Reader for the Comparative Sentence Dataset by Jindal and Liu (2006).
>>> from nltk.corpus import comparative_sentences
>>> comparison = comparative_sentences.comparisons()[0]
>>> comparison.text
['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly',
'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve",
'had', '.']
>>> comparison.entity_2
'models'
>>> (comparison.feature, comparison.keyword)
('rewind', 'more')
>>> len(comparative_sentences.comparisons())
853
"""
CorpusView = StreamBackedCorpusView
def __init__(
self,
root,
fileids,
word_tokenizer=WhitespaceTokenizer(),
sent_tokenizer=None,
encoding="utf8",
):
"""
:param root: The root directory for this corpus.
:param fileids: a list or regexp specifying the fileids in this corpus.
:param word_tokenizer: tokenizer for breaking sentences or paragraphs
into words. Default: `WhitespaceTokenizer`
:param sent_tokenizer: tokenizer for breaking paragraphs into sentences.
:param encoding: the encoding that should be used to read the corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
self._sent_tokenizer = sent_tokenizer
self._readme = "README.txt"
def comparisons(self, fileids=None):
"""
Return all comparisons in the corpus.
:param fileids: a list or regexp specifying the ids of the files whose
comparisons have to be returned.
:return: the given file(s) as a list of Comparison objects.
:rtype: list(Comparison)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, str):
fileids = [fileids]
return concat(
[
self.CorpusView(path, self._read_comparison_block, encoding=enc)
for (path, enc, fileid) in self.abspaths(fileids, True, True)
]
)
def keywords(self, fileids=None):
"""
Return a set of all keywords used in the corpus.
:param fileids: a list or regexp specifying the ids of the files whose
keywords have to be returned.
:return: the set of keywords and comparative phrases used in the corpus.
:rtype: set(str)
"""
all_keywords = concat(
[
self.CorpusView(path, self._read_keyword_block, encoding=enc)
for (path, enc, fileid) in self.abspaths(fileids, True, True)
]
)
keywords_set = {keyword.lower() for keyword in all_keywords if keyword}
return keywords_set
def keywords_readme(self):
"""
Return the list of words and constituents considered as clues of a
comparison (from listOfkeywords.txt).
"""
keywords = []
with self.open("listOfkeywords.txt") as fp:
raw_text = fp.read()
for line in raw_text.split("\n"):
if not line or line.startswith("//"):
continue
keywords.append(line.strip())
return keywords
def sents(self, fileids=None):
"""
Return all sentences in the corpus.
:param fileids: a list or regexp specifying the ids of the files whose
sentences have to be returned.
:return: all sentences of the corpus as lists of tokens (or as plain
strings, if no word tokenizer is specified).
:rtype: list(list(str)) or list(str)
"""
return concat(
[
self.CorpusView(path, self._read_sent_block, encoding=enc)
for (path, enc, fileid) in self.abspaths(fileids, True, True)
]
)
def words(self, fileids=None):
"""
Return all words and punctuation symbols in the corpus.
:param fileids: a list or regexp specifying the ids of the files whose
words have to be returned.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return concat(
[
self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid) in self.abspaths(fileids, True, True)
]
)
def _read_comparison_block(self, stream):
while True:
line = stream.readline()
if not line:
return [] # end of file.
comparison_tags = re.findall(COMPARISON, line)
if comparison_tags:
grad_comparisons = re.findall(GRAD_COMPARISON, line)
non_grad_comparisons = re.findall(NON_GRAD_COMPARISON, line)
# Advance to the next line (it contains the comparative sentence)
comparison_text = stream.readline().strip()
if self._word_tokenizer:
comparison_text = self._word_tokenizer.tokenize(comparison_text)
# Skip the next line (it contains closing comparison tags)
stream.readline()
# If gradable comparisons are found, create Comparison instances
# and populate their fields
comparison_bundle = []
if grad_comparisons:
# Each comparison tag has its own relations on a separate line
for comp in grad_comparisons:
comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
comparison = Comparison(
text=comparison_text, comp_type=comp_type
)
line = stream.readline()
entities_feats = ENTITIES_FEATS.findall(line)
if entities_feats:
for (code, entity_feat) in entities_feats:
if code == "1":
comparison.entity_1 = entity_feat.strip()
elif code == "2":
comparison.entity_2 = entity_feat.strip()
elif code == "3":
comparison.feature = entity_feat.strip()
keyword = KEYWORD.findall(line)
if keyword:
comparison.keyword = keyword[0]
comparison_bundle.append(comparison)
# If non-gradable comparisons are found, create a simple Comparison
# instance for each one
if non_grad_comparisons:
for comp in non_grad_comparisons:
# comp_type in this case should always be 4.
comp_type = int(re.match(r"<cs-(\d)>", comp).group(1))
comparison = Comparison(
text=comparison_text, comp_type=comp_type
)
comparison_bundle.append(comparison)
# Flatten the list of comparisons before returning them
# return concat([comparison_bundle])
return comparison_bundle
def _read_keyword_block(self, stream):
keywords = []
for comparison in self._read_comparison_block(stream):
keywords.append(comparison.keyword)
return keywords
def _read_sent_block(self, stream):
while True:
line = stream.readline()
if re.match(STARS, line):
while True:
line = stream.readline()
if re.match(STARS, line):
break
continue
if (
not re.findall(COMPARISON, line)
and not ENTITIES_FEATS.findall(line)
and not re.findall(CLOSE_COMPARISON, line)
):
if self._sent_tokenizer:
return [
self._word_tokenizer.tokenize(sent)
for sent in self._sent_tokenizer.tokenize(line)
]
else:
return [self._word_tokenizer.tokenize(line)]
def _read_word_block(self, stream):
words = []
for sent in self._read_sent_block(stream):
words.extend(sent)
return words
| 37.829032
| 87
| 0.573378
|
33617a30ea3130bf59ace2e309344764f1ed4f04
| 607
|
py
|
Python
|
setup.py
|
payamnj/django-simpletree
|
c8b6959f27a072a56a8fb3f451ad19bbd78165a7
|
[
"BSD-3-Clause"
] | 2
|
2016-06-13T09:21:24.000Z
|
2016-06-28T22:47:17.000Z
|
setup.py
|
payamnj/django-simpletree
|
c8b6959f27a072a56a8fb3f451ad19bbd78165a7
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
payamnj/django-simpletree
|
c8b6959f27a072a56a8fb3f451ad19bbd78165a7
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
setup(
name='django_simpletree',
version='0.1.2',
description='A django app with an abstract model which helps \
to define tree style models with unlimited sub branches',
url='https://github.com/payamnj/django-simpletree',
author='Payam Najafizadeh',
author_email='payam.nj@gmail.com',
license='New BSD',
packages=['simpletree', ],
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Framework :: Django'],
requires=['django(>=1.8)'],)
| 31.947368
| 66
| 0.655684
|
011e4889d94321f2fbf34465205e73474bb3ab16
| 2,983
|
py
|
Python
|
QWeb/internal/exceptions.py
|
sthagen/qentinelqi-qweb
|
e372729514e124a36cd41ee1ec0cff091e11ff8d
|
[
"Apache-2.0"
] | 1
|
2021-11-08T09:26:44.000Z
|
2021-11-08T09:26:44.000Z
|
QWeb/internal/exceptions.py
|
sthagen/qentinelqi-qweb
|
e372729514e124a36cd41ee1ec0cff091e11ff8d
|
[
"Apache-2.0"
] | null | null | null |
QWeb/internal/exceptions.py
|
sthagen/qentinelqi-qweb
|
e372729514e124a36cd41ee1ec0cff091e11ff8d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# --------------------------
# Copyright © 2014 - Qentinel Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ---------------------------
from __future__ import annotations
FATAL_MESSAGES: list[str] = [
"Failed to decode response", "chrome not reachable", "window was already closed",
"Unable to get browser", "session deleted", "0 tabs open"
]
class QWebException(Exception):
"""
Base class for other QWebExceptions
"""
class QWebSearchingMode(QWebException):
"""Raise when Searching mode is on. Prevents kw
to execute"""
class QWebInstanceDoesNotExistError(QWebException):
"""Raise when for example table instance is
undefined while user tries to use it"""
class QWebStalingElementError(QWebException):
"""Raise when Element is staling."""
class QWebElementNotFoundError(QWebException):
"""Raise when Element is not found
from document."""
class QWebValueError(QWebException):
"""Raise when there is mismatch between
expected condition/value and real situation."""
class QWebDriverError(QWebException):
"""Raise when element is not enabled
or whatever situation where webdriver prevents our
preferred action."""
class QWebTimeoutError(QWebException):
"""Raise when running out of time, preferred action is still
unfinished and no other exceptions exists."""
class QWebUnexpectedConditionError(QWebException):
"""Raise when expected condition is not true. This is
used by actions decorators during an execution."""
class QWebInvalidElementStateError(QWebDriverError):
"""Raise if element is in disabled state when trying
to trigger keyword action."""
class QWebValueMismatchError(QWebValueError):
""" Raise if real value is different than
expected value."""
class QWebFileNotFoundError(QWebValueError):
""" Raise if reference file is missing. """
class QWebTextNotFoundError(QWebException):
"""Raise when ScrollTo KW does not find searched text."""
class QWebUnexpectedAlert(QWebException):
"""Raise when actions are blocked by an alert box"""
class QWebEnvironmentError(QWebException):
"""Raise when actions do not work because of a faulty environment."""
class QWebBrowserError(QWebException):
"""Raise when connection to browser has been lost / browser crashed etc."""
class QWebIconNotFoundError(QWebException):
"""Raise when picture/icon is not found with image recognition."""
| 28.682692
| 85
| 0.723098
|
1f7ae662d4fcdd772330a3a4aecfeca0acafae4c
| 8,229
|
py
|
Python
|
build.py
|
jaraco/aspen
|
ec0f6d5bef5e7100ac2374b38349a51f8394eabb
|
[
"MIT"
] | 1
|
2016-08-23T16:13:22.000Z
|
2016-08-23T16:13:22.000Z
|
build.py
|
jaraco/aspen
|
ec0f6d5bef5e7100ac2374b38349a51f8394eabb
|
[
"MIT"
] | null | null | null |
build.py
|
jaraco/aspen
|
ec0f6d5bef5e7100ac2374b38349a51f8394eabb
|
[
"MIT"
] | null | null | null |
from __future__ import division, print_function, unicode_literals, with_statement
import os
import sys
import os.path
from fabricate import main, run, shell, autoclean
# Core Executables
# ================
# We satisfy dependencies using local tarballs, to ensure that we can build
# without a network connection. They're kept in our repo in ./vendor.
ASPEN_DEPS = [
'python-mimeparse>=0.1.4',
'first>=2.0.1',
'algorithm>=1.0.0',
'filesystem_tree>=1.0.1',
'dependency_injection>=1.1.0',
]
TEST_DEPS = [
'coverage>=3.7.1',
'cov-core>=1.7',
'py>=1.4.20',
'pytest>=2.5.2',
'pytest-cov>=1.6',
]
INSTALL_DIR = './vendor/install'
TEST_DIR = './vendor/test'
BOOTSTRAP_DIR = './vendor/bootstrap'
ENV_ARGS = [
'./vendor/virtualenv-13.0.3.py',
'--prompt=[aspen]',
'--extra-search-dir=' + BOOTSTRAP_DIR,
]
def _virt(cmd, envdir='env'):
envdir = _env(envdir)
if os.name == "nt":
return os.path.join(envdir, 'Scripts', cmd + '.exe')
else:
return os.path.join(envdir, 'bin', cmd)
def _virt_version(envdir):
v = shell(_virt('python', envdir), '-c',
'import sys; print(sys.version_info[:2])')
return eval(v)
def _env(envdir='env'):
# http://stackoverflow.com/a/1883251
if hasattr(sys, 'real_prefix'):
# We're already inside someone else's virtualenv.
return sys.prefix
elif hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
# We're already inside someone else's pyvenv.
return sys.prefix
elif os.path.exists(envdir):
# We've already built our own virtualenv.
return envdir
args = [sys.executable] + ENV_ARGS + [envdir]
run(*args)
return envdir
def env():
"""set up a base virtual environment"""
_env()
def deps():
"""set up an environment able to run aspen"""
_deps()
def _deps(envdir='env'):
envdir = _env(envdir)
v = shell(_virt('python', envdir), '-c', 'import aspen; print("found")', ignore_status=True)
if b"found" in v:
return envdir
for dep in ASPEN_DEPS:
run(_virt('pip', envdir), 'install', '--no-index',
'--find-links=' + INSTALL_DIR, dep)
run(_virt('python', envdir), 'setup.py', 'develop')
return envdir
def _dev_deps(envdir='env'):
envdir = _deps(envdir)
# pytest will need argparse if it's running under 2.6
if _virt_version(envdir) < (2, 7):
TEST_DEPS.insert(0, 'argparse')
for dep in TEST_DEPS:
run(_virt('pip', envdir), 'install', '--no-index',
'--find-links=' + TEST_DIR, dep)
return envdir
def dev():
"""set up an environment able to run tests in env/"""
_dev_deps()
def clean_env():
"""clean env artifacts"""
shell('rm', '-rf', 'env')
def clean():
"""clean all artifacts"""
autoclean()
shell('find', '.', '-name', '*.pyc', '-delete')
clean_env()
clean_sphinx()
clean_jenv()
clean_test()
clean_build()
# Docs
# ====
def docserve():
"""run the aspen website"""
envdir = _deps()
run(_virt('pip', envdir), 'install', 'aspen-tornado')
run(_virt('pip', envdir), 'install', 'pygments')
shell(_virt('python', envdir), '-m', 'aspen_io', silent=False)
def _sphinx_cmd(packages, cmd):
envdir = _deps(envdir='denv')
for p in packages:
run(_virt('pip', envdir='denv'), 'install', p)
sphinxopts = []
builddir = 'docs/_build'
run('mkdir', '-p', builddir)
newenv = os.environ
newenv.update({'PYTHONPATH': 'denv/lib/python2.7/site-packages'})
args = ['-b', 'html', '-d', builddir + '/doctrees', sphinxopts,
'docs', builddir + '/html']
run(_virt(cmd, envdir=envdir), args, env=newenv)
def sphinx():
"""build sphinx documents"""
_sphinx_cmd(['sphinx'], "sphinx-build")
def autosphinx():
"""run sphinx-autobuild"""
_sphinx_cmd(['sphinx', 'sphinx-autobuild'], "sphinx-autobuild")
def clean_sphinx():
"""clean sphinx artifacts"""
shell('rm', '-rf', 'docs/_build')
shell('rm', '-rf', 'denv')
# Testing
# =======
def test():
"""run all tests"""
shell(_virt('py.test', _dev_deps()), 'tests/', ignore_status=True, silent=False)
def testf():
"""run tests, stopping at the first failure"""
shell(_virt('py.test', _dev_deps()), '-x', 'tests/', ignore_status=True, silent=False)
def pylint():
"""run lint"""
envdir = _env()
run(_virt('pip', envdir), 'install', 'pylint')
run(_virt('pylint', envdir), '--rcfile=.pylintrc',
'aspen', '|', 'tee', 'pylint.out', shell=True, ignore_status=True)
def test_cov():
"""run code coverage"""
run(_virt('py.test', _dev_deps()),
'--junitxml=testresults.xml',
'--cov-report', 'term',
'--cov-report', 'xml',
'--cov-report', 'html',
'--cov', 'aspen',
'tests/',
ignore_status=False)
def analyse():
"""run lint and coverage"""
pylint()
test_cov()
print('done!')
def clean_test():
"""clean test artifacts"""
clean_env()
shell('rm', '-rf', '.coverage', 'coverage.xml', 'testresults.xml', 'htmlcov', 'pylint.out')
# Build
# =====
def build():
"""build an egg"""
run(sys.executable, 'setup.py', 'bdist_egg')
def wheel():
"""build a wheel"""
run(sys.executable, 'setup.py', 'bdist_wheel')
def clean_build():
"""clean build artifacts"""
run('python', 'setup.py', 'clean', '-a')
run('rm', '-rf', 'dist')
# Jython
# ======
JYTHON_URL = "http://search.maven.org/remotecontent?filepath=org/python/jython-installer/2.7-b1/jython-installer-2.7-b1.jar"
def _jython_home():
if not os.path.exists('jython_home'):
local_jython = os.path.join('vendor', 'jython-installer.jar')
run('wget', JYTHON_URL, '-qO', local_jython)
run('java', '-jar', local_jython, '-s', '-d', 'jython_home')
def _jenv():
_jython_home()
jenv = dict(os.environ)
jenv['PATH'] = os.path.join('.', 'jython_home', 'bin') + ':' + jenv['PATH']
args = [ 'jython' ] + ENV_ARGS + [ '--python=jython', 'jenv' ]
run(*args, env=jenv)
def clean_jenv():
"""clean up the jython environment"""
shell('find', '.', '-name', '*.class', '-delete')
shell('rm', '-rf', 'jenv', 'vendor/jython-installer.jar', 'jython_home')
def jython_test():
"""install jython and run tests with coverage (requires java)"""
_jenv()
for dep in TEST_DEPS:
run(_virt('pip', 'jenv'), 'install', os.path.join('vendor', dep))
run(_virt('jython', 'jenv'), 'setup.py', 'develop')
run(_virt('jython', 'jenv'), _virt('py.test', 'jenv'),
'--junitxml=jython-testresults.xml', 'tests',
'--cov-report', 'term',
'--cov-report', 'xml',
'--cov', 'aspen',
ignore_status=True)
def clean_jtest():
"""clean jython test results"""
shell('find', '.', '-name', '*.class', '-delete')
shell('rm', '-rf', 'jython-testresults.xml')
def show_targets():
"""show the list of valid targets (this list)"""
print("Valid targets:\n")
# organize these however
targets = ['show_targets', None,
'env', 'deps', 'dev', 'testf', 'test', 'pylint', 'test_cov', 'analyse', None,
'build', 'wheel', None,
'docserve', 'sphinx', 'autosphinx', None,
'clean', 'clean_env', 'clean_test', 'clean_build', 'clean_sphinx', None,
'jython_test', None,
'clean_jenv', 'clean_jtest', None,
]
#docs = '\n'.join([" %s - %s" % (t, LOCALS[t].__doc__) for t in targets])
#print(docs)
for t in targets:
if t is not None:
print(" %s - %s" % (t, LOCALS[t].__doc__))
else:
print("")
if len(targets) < (len(LOCALS) - len(NON_TARGETS)):
missed = set(LOCALS.keys()).difference(NON_TARGETS, targets)
print("Unordered targets: " + ', '.join(sorted(missed)))
sys.exit()
LOCALS = dict(locals())
NON_TARGETS = [ 'main', 'autoclean', 'run', 'shell' ]
NON_TARGETS += list(x for x in LOCALS if x.startswith('_') or not callable(LOCALS[x] ))
main( default='show_targets'
, ignoreprefix="python" # workaround for gh190
)
| 27.158416
| 124
| 0.580751
|
bedf4d04528153ed0fa0b1134e729d819a8dac5c
| 66
|
py
|
Python
|
tensorflow_advanced_segmentation_models/__version__.py
|
JanMarcelKezmann/TensorFlow-Advanced-Segmentation-Models
|
f3927d1bf8e3d37ae55a032c2f1477049a255197
|
[
"MIT"
] | 72
|
2020-09-18T11:15:02.000Z
|
2022-03-31T04:14:23.000Z
|
tensorflow_advanced_segmentation_models/__version__.py
|
JanMarcelKezmann/TensorFlow-Advanced-Segmentation-Models
|
f3927d1bf8e3d37ae55a032c2f1477049a255197
|
[
"MIT"
] | 18
|
2020-09-13T08:55:23.000Z
|
2022-02-17T19:18:10.000Z
|
tensorflow_advanced_segmentation_models/__version__.py
|
JanMarcelKezmann/TensorFlow-Advanced-Segmentation-Models
|
f3927d1bf8e3d37ae55a032c2f1477049a255197
|
[
"MIT"
] | 29
|
2020-12-24T15:38:40.000Z
|
2022-03-24T01:36:05.000Z
|
VERSION = (0, 4, 8)
__version__ = ".".join(map(str, VERSION))
| 16.5
| 42
| 0.575758
|
c232f05aaa8d8c82a6d6c80dac66caecdd81e374
| 7,155
|
py
|
Python
|
libcloud/test/compute/test_rackspace.py
|
Jc2k/libcloud
|
508dfb4791e5f3f49214567725d56b367075effc
|
[
"Apache-2.0"
] | 1
|
2015-11-05T15:37:04.000Z
|
2015-11-05T15:37:04.000Z
|
libcloud/test/compute/test_rackspace.py
|
Jc2k/libcloud
|
508dfb4791e5f3f49214567725d56b367075effc
|
[
"Apache-2.0"
] | null | null | null |
libcloud/test/compute/test_rackspace.py
|
Jc2k/libcloud
|
508dfb4791e5f3f49214567725d56b367075effc
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import method_type
from libcloud.utils.py3 import httplib
from libcloud.compute.providers import DEPRECATED_RACKSPACE_PROVIDERS
from libcloud.compute.providers import get_driver
from libcloud.compute.drivers.rackspace import RackspaceFirstGenNodeDriver
from libcloud.compute.drivers.rackspace import RackspaceNodeDriver
from libcloud.test.compute.test_openstack import OpenStack_1_0_Tests
from libcloud.test.compute.test_openstack import OpenStack_1_1_Tests, \
OpenStack_1_1_MockHttp
from libcloud.pricing import clear_pricing_data
from libcloud.test.secrets import RACKSPACE_NOVA_PARAMS
from libcloud.test.secrets import RACKSPACE_PARAMS
class RackspaceusFirstGenUsTests(OpenStack_1_0_Tests):
should_list_locations = True
should_have_pricing = True
driver_klass = RackspaceFirstGenNodeDriver
driver_type = RackspaceFirstGenNodeDriver
driver_args = RACKSPACE_PARAMS
driver_kwargs = {'region': 'us'}
def test_error_is_thrown_on_accessing_old_constant(self):
for provider in DEPRECATED_RACKSPACE_PROVIDERS:
try:
get_driver(provider)
except Exception:
e = sys.exc_info()[1]
self.assertTrue(str(e).find('has been removed') != -1)
else:
self.fail('Exception was not thrown')
def test_list_sizes_pricing(self):
sizes = self.driver.list_sizes()
for size in sizes:
self.assertTrue(size.price > 0)
class RackspaceusFirstGenUkTests(OpenStack_1_0_Tests):
should_list_locations = True
should_have_pricing = True
driver_klass = RackspaceFirstGenNodeDriver
driver_type = RackspaceFirstGenNodeDriver
driver_args = RACKSPACE_PARAMS
driver_kwargs = {'region': 'uk'}
def test_list_sizes_pricing(self):
sizes = self.driver.list_sizes()
for size in sizes:
self.assertTrue(size.price > 0)
class RackspaceNovaMockHttp(OpenStack_1_1_MockHttp):
def __init__(self, *args, **kwargs):
super(RackspaceNovaMockHttp, self).__init__(*args, **kwargs)
methods1 = OpenStack_1_1_MockHttp.__dict__
names1 = [m for m in methods1 if m.find('_v1_1') == 0]
for name in names1:
method = methods1[name]
new_name = name.replace('_v1_1_slug_', '_v2_1337_')
setattr(self, new_name, method_type(method, self,
RackspaceNovaMockHttp))
class RackspaceNovaLonMockHttp(RackspaceNovaMockHttp):
def _v2_0_tokens(self, method, url, body, headers):
body = self.auth_fixtures.load('_v2_0__auth_lon.json')
return (httplib.OK, body, self.json_content_headers,
httplib.responses[httplib.OK])
class RackspaceNovaDfwTests(OpenStack_1_1_Tests):
driver_klass = RackspaceNodeDriver
driver_type = RackspaceNodeDriver
driver_args = RACKSPACE_NOVA_PARAMS
driver_kwargs = {'region': 'dfw'}
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, **self.driver_kwargs)
def setUp(self):
self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp,
RackspaceNovaMockHttp)
self.driver_klass.connectionCls.auth_url = \
'https://auth.api.example.com/v2.0/'
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
self.node = self.driver.list_nodes()[1]
def test_service_catalog(self):
self.assertEqual(
'https://dfw.servers.api.rackspacecloud.com/v2/1337',
self.driver.connection.get_endpoint())
class RackspaceNovaOrdTests(OpenStack_1_1_Tests):
driver_klass = RackspaceNodeDriver
driver_type = RackspaceNodeDriver
driver_args = RACKSPACE_NOVA_PARAMS
driver_kwargs = {'region': 'ord'}
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, **self.driver_kwargs)
def setUp(self):
self.driver_klass.connectionCls.conn_classes = (RackspaceNovaMockHttp,
RackspaceNovaMockHttp)
self.driver_klass.connectionCls.auth_url = \
'https://auth.api.example.com/v2.0/'
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
self.node = self.driver.list_nodes()[1]
def test_list_sizes_pricing(self):
sizes = self.driver.list_sizes()
for size in sizes:
if size.ram > 256:
self.assertTrue(size.price > 0)
def test_service_catalog(self):
self.assertEqual('https://ord.servers.api.rackspacecloud.com/v2/1337',
self.driver.connection.get_endpoint())
class RackspaceNovaLonTests(OpenStack_1_1_Tests):
driver_klass = RackspaceNodeDriver
driver_type = RackspaceNodeDriver
driver_args = RACKSPACE_NOVA_PARAMS
driver_kwargs = {'region': 'lon'}
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args, **self.driver_kwargs)
def setUp(self):
self.driver_klass.connectionCls.conn_classes = \
(RackspaceNovaLonMockHttp, RackspaceNovaLonMockHttp)
self.driver_klass.connectionCls.auth_url = \
'https://lon.auth.api.example.com/v2.0/'
self.driver = self.create_driver()
# normally authentication happens lazily, but we force it here
self.driver.connection._populate_hosts_and_request_paths()
clear_pricing_data()
self.node = self.driver.list_nodes()[1]
def test_list_sizes_pricing(self):
sizes = self.driver.list_sizes()
for size in sizes:
if size.ram > 256:
self.assertTrue(size.price > 0)
def test_service_catalog(self):
self.assertEqual('https://lon.servers.api.rackspacecloud.com/v2/1337',
self.driver.connection.get_endpoint())
if __name__ == '__main__':
sys.exit(unittest.main())
| 36.319797
| 78
| 0.689308
|
ae60303002ccada953e812db6c44443264f7d51a
| 1,554
|
py
|
Python
|
analysis.py
|
dabows/Spotify_top200_anal
|
9819c090d8df5160022b7cbc192e119f464f2d73
|
[
"MIT"
] | null | null | null |
analysis.py
|
dabows/Spotify_top200_anal
|
9819c090d8df5160022b7cbc192e119f464f2d73
|
[
"MIT"
] | null | null | null |
analysis.py
|
dabows/Spotify_top200_anal
|
9819c090d8df5160022b7cbc192e119f464f2d73
|
[
"MIT"
] | null | null | null |
'''
Shows top artist by appearances on top 200 as well
as top streams.
Also makes the vid.
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as ani
import bar_chart_race as bar
def countArtists():
df = pd.read_csv('./products/all_df.csv')
arts = df['Artist'].tolist()
streams = df['Streams'].tolist()
art_count = {}
art_streams = {}
for index, art in enumerate(arts):
if not art in art_count:
art_count[art] = 1
else:
art_count[art] += 1
if not art in art_streams:
art_streams[art] = streams[index]
else:
art_streams[art] += streams[index]
df_art_cnt = pd.DataFrame(art_count.items(), columns=['Artist','Count'])
df_art_strm = pd.DataFrame(art_streams.items(), columns=['Artist','Streams'])
df_mgd = df_art_cnt.merge(df_art_strm, how='outer')
#df_mgd = df_mgd.sort_values(by='Streams', ascending=False)
df_mgd = df_mgd.sort_values(by='Count', ascending=False)
print(df_mgd.head(5))
#df_mgd.to_csv('./products/artist_count.csv')
def aniGraph():
df = pd.read_csv('./products/artist_streams_timeseries.csv', index_col=0)
fig = plt.figure()
#.iloc[::3, :] add to df to skip days
bar.bar_chart_race(df, 'Racebar.mp4', figsize=(9,5), n_bars=5,
fixed_max=True, period_length=150, steps_per_period=10, title='Spotify Top 5 Streams Per Day(Top 200 chart)')
if __name__ == '__main__':
countArtists()
#aniGraph()
| 25.9
| 114
| 0.644788
|
cac5785c0a127424a5854dd416e82af15b5509f5
| 3,192
|
py
|
Python
|
code/arch_12/dataset.py
|
dalmouiee/RDP-Net-Ret
|
0f0c7aecef807e00ad71f7ae47827163018a98e8
|
[
"MIT"
] | null | null | null |
code/arch_12/dataset.py
|
dalmouiee/RDP-Net-Ret
|
0f0c7aecef807e00ad71f7ae47827163018a98e8
|
[
"MIT"
] | null | null | null |
code/arch_12/dataset.py
|
dalmouiee/RDP-Net-Ret
|
0f0c7aecef807e00ad71f7ae47827163018a98e8
|
[
"MIT"
] | null | null | null |
import cv2
import os
import glob
from sklearn.utils import shuffle
import numpy as np
def load_train(train_path, image_size, classes):
images = []
labels = []
img_names = []
cls = []
print('Going to read training images')
for fields in classes:
index = classes.index(fields)
print('Now going to read {} files (Index: {})'.format(fields, index))
path = os.path.join(train_path, fields, '*g')
files = glob.glob(path)
for fl in files:
image = cv2.imread(fl)
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
image = image.astype(np.float32)
image = np.multiply(image, 1.0 / 255.0)
images.append(image)
label = np.zeros(len(classes))
label[index] = 1.0
labels.append(label)
flbase = os.path.basename(fl)
img_names.append(flbase)
cls.append(fields)
images = np.array(images)
labels = np.array(labels)
img_names = np.array(img_names)
cls = np.array(cls)
return images, labels, img_names, cls
class DataSet(object):
def __init__(self, images, labels, img_names, cls):
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._img_names = img_names
self._cls = cls
self._epochs_done = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def img_names(self):
return self._img_names
@property
def cls(self):
return self._cls
@property
def num_examples(self):
return self._num_examples
@property
def epochs_done(self):
return self._epochs_done
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
self._epochs_done += 1
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end], self._img_names[start:end], self._cls[start:end]
def read_train_sets(train_path, image_size, classes, validation_size):
class DataSets(object):
pass
data_sets = DataSets()
images, labels, img_names, cls = load_train(train_path, image_size, classes)
images, labels, img_names, cls = shuffle(images, labels, img_names, cls)
if isinstance(validation_size, float):
validation_size = int(validation_size * images.shape[0])
validation_images = images[:validation_size]
validation_labels = labels[:validation_size]
validation_img_names = img_names[:validation_size]
validation_cls = cls[:validation_size]
train_images = images[validation_size:]
train_labels = labels[validation_size:]
train_img_names = img_names[validation_size:]
train_cls = cls[validation_size:]
data_sets.train = DataSet(train_images, train_labels, train_img_names, train_cls)
data_sets.valid = DataSet(validation_images, validation_labels, validation_img_names, validation_cls)
return data_sets
| 28
| 109
| 0.682331
|
b60af66ee3c1d843ad05e3f8d23c11736db9db3d
| 11,881
|
py
|
Python
|
cinder/volume/drivers/datera/datera_common.py
|
cloudification-io/cinder
|
23d76e01f2b4f3771b57fb287084a4884238b827
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/volume/drivers/datera/datera_common.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/volume/drivers/datera/datera_common.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright 2020 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import re
import string
import time
import types
import uuid
from glanceclient import exc as glance_exc
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.image import glance
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
dfs_sdk = importutils.try_import('dfs_sdk')
OS_PREFIX = "OS"
UNMANAGE_PREFIX = "UNMANAGED"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s.*([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12})")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
SNAP_RE = re.compile(r"\d{10,}\.\d+")
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
DEFAULT_SI_SLEEP = 1
DEFAULT_SI_SLEEP_API_2 = 5
DEFAULT_SNAP_SLEEP = 1
API_VERSIONS = ["2.1", "2.2"]
API_TIMEOUT = 20
VALID_CHARS = set(string.ascii_letters + string.digits + "-_.")
class DateraAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from Datera API")
def get_name(resource):
dn = resource.get('display_name')
cid = resource.get('id')
if dn:
dn = filter_chars(dn)
# Check to ensure the name is short enough to fit. Prioritize
# the prefix and Cinder ID, strip all invalid characters
nl = len(OS_PREFIX) + len(dn) + len(cid) + 2
if nl >= 64:
dn = dn[:-(nl - 63)]
return "-".join((OS_PREFIX, dn, cid))
return "-".join((OS_PREFIX, cid))
def get_unmanaged(name):
return "-".join((UNMANAGE_PREFIX, name))
def filter_chars(s):
if s:
return ''.join([c for c in s if c in VALID_CHARS])
return s
def lookup(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = args[0]
name = "_" + func.__name__ + "_" + obj.apiv.replace(".", "_")
LOG.debug("Trying method: %s", name)
call_id = uuid.uuid4()
if obj.do_profile:
LOG.debug("Profiling method: %s, id %s", name, call_id)
t1 = time.time()
obj.thread_local.trace_id = call_id
result = getattr(obj, name)(*args[1:], **kwargs)
if obj.do_profile:
t2 = time.time()
timedelta = round(t2 - t1, 3)
LOG.debug("Profile for method %s, id %s: %ss",
name, call_id, timedelta)
return result
return wrapper
def _parse_vol_ref(ref):
if ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = ref.split(":")
if tenant == "root":
tenant = None
except (TypeError, ValueError):
app_inst_name, storage_inst_name, vol_name = ref.split(
":")
tenant = None
return app_inst_name, storage_inst_name, vol_name, tenant
def _check_snap_ref(ref):
if not SNAP_RE.match(ref):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"1234567890.12345678"))
return True
def _get_size(app_inst):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = sis[0]
found_vol = found_si['volumes'][0]
return found_vol['size']
def _get_volume_type_obj(driver, resource):
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
volume_type = None
return volume_type
def _get_policies_for_resource(driver, resource):
volume_type = driver._get_volume_type_obj(resource)
return driver._get_policies_for_volume_type(volume_type)
def _get_policies_for_volume_type(driver, volume_type):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs', {})
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in driver._init_vendor_properties()[0].items()}
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
specs.update(qos_kvs)
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
def _image_accessible(driver, context, volume, image_meta):
# Determine if image is accessible by current project
pid = volume.get('project_id', '')
public = False
visibility = image_meta.get('visibility', None)
LOG.debug("Image %(image)s visibility: %(vis)s",
{"image": image_meta['id'], "vis": visibility})
if visibility and visibility in ['public', 'community']:
public = True
elif visibility and visibility in ['shared', 'private']:
# Do membership check. Newton and before didn't have a 'shared'
# visibility option, so we have to do this check for 'private'
# as well
gclient = glance.get_default_image_service()
members = []
# list_members is only available in Rocky+
try:
members = gclient.list_members(context, image_meta['id'])
except AttributeError:
# This is the fallback method for the same query
try:
members = gclient._client.call(context,
'list',
controller='image_members',
image_id=image_meta['id'])
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
members = list(members)
LOG.debug("Shared image %(image)s members: %(members)s",
{"image": image_meta['id'], "members": members})
for member in members:
if (member['member_id'] == pid and
member['status'] == 'accepted'):
public = True
break
if image_meta.get('is_public', False):
public = True
else:
if image_meta.get('owner', '') == pid:
public = True
if not public:
LOG.warning("Requested image is not "
"accessible by current Project.")
return public
def _format_tenant(tenant):
if tenant == "all" or (tenant and ('/root' in tenant or 'root' in tenant)):
return '/root'
elif tenant and ('/root' not in tenant and 'root' not in tenant):
return "/" + "/".join(('root', tenant)).strip('/')
return tenant
def get_ip_pool(policies):
ip_pool = policies['ip_pool']
if ',' in ip_pool:
ip_pools = ip_pool.split(',')
ip_pool = random.choice(ip_pools)
return ip_pool
def create_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
name = get_name({'id': project_id})
elif driver.tenant_id:
name = driver.tenant_id.replace('root', '').strip('/')
else:
name = 'root'
if name:
try:
driver.api.tenants.create(name=name)
except dfs_sdk.exceptions.ApiConflictError:
LOG.debug("Tenant %s already exists", name)
return _format_tenant(name)
def get_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
return _format_tenant(get_name({'id': project_id}))
elif not driver.tenant_id:
return _format_tenant('root')
return _format_tenant(driver.tenant_id)
def cvol_to_ai(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
try:
# api.tenants.get needs a non '/'-prefixed tenant id
driver.api.tenants.get(tenant.strip('/'))
except dfs_sdk.exceptions.ApiNotFoundError:
create_tenant(driver, resource['project_id'])
cid = resource.get('id', None)
if not cid:
raise ValueError('Unsure what id key to use for object', resource)
ais = driver.api.app_instances.list(
filter='match(name,.*{}.*)'.format(cid),
tenant=tenant)
if not ais:
raise exception.VolumeNotFound(volume_id=cid)
return ais[0]
def cvol_to_dvol(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
ai = cvol_to_ai(driver, resource, tenant=tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
vol = si.volumes.list(tenant=tenant)[0]
return vol
def _version_to_int(ver):
# Using a factor of 100 per digit so up to 100 versions are supported
# per major/minor/patch/subpatch digit in this calculation
# Example:
# In [2]: _version_to_int("3.3.0.0")
# Out[2]: 303000000
# In [3]: _version_to_int("2.2.7.1")
# Out[3]: 202070100
VERSION_DIGITS = 4
factor = pow(10, VERSION_DIGITS * 2)
div = pow(10, 2)
val = 0
for c in ver.split("."):
val += int(int(c) * factor)
factor /= div
return val
def dat_version_gte(version_a, version_b):
return _version_to_int(version_a) >= _version_to_int(version_b)
def register_driver(driver):
for func in [_get_volume_type_obj,
_get_policies_for_resource,
_get_policies_for_volume_type,
_image_accessible,
get_tenant,
create_tenant,
cvol_to_ai,
cvol_to_dvol]:
f = types.MethodType(func, driver)
setattr(driver, func.__name__, f)
| 32.820442
| 79
| 0.620655
|
85e42bf15723640f3e27c294dcae6592cada845d
| 3,206
|
py
|
Python
|
profiles_project/settings.py
|
shycoder06/profiles-rest-api
|
933332b970fb4c0ed3b2c936218abdb80bbf0271
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
shycoder06/profiles-rest-api
|
933332b970fb4c0ed3b2c936218abdb80bbf0271
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
shycoder06/profiles-rest-api
|
933332b970fb4c0ed3b2c936218abdb80bbf0271
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5f9zkq4+95wy##ht8gadrhhhyusit8w6btt6_g_6^tu@det7#g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 26.716667
| 91
| 0.703369
|
e98c61c6a92db97f657af561559d3046041c8df8
| 958
|
py
|
Python
|
blog/admin.py
|
masod-abbasian/mysite
|
5c321d0079a26ccd6b1bf25ebc7ecc44da05bc85
|
[
"MIT"
] | 2
|
2021-11-20T04:22:11.000Z
|
2021-12-03T09:16:51.000Z
|
blog/admin.py
|
masod-abbasian/mysite
|
5c321d0079a26ccd6b1bf25ebc7ecc44da05bc85
|
[
"MIT"
] | null | null | null |
blog/admin.py
|
masod-abbasian/mysite
|
5c321d0079a26ccd6b1bf25ebc7ecc44da05bc85
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from blog.models import POST,Category,Comment
from django_summernote.admin import SummernoteModelAdmin
# Register your models here.
# @admin.register(POST)
class POSTAdmin(SummernoteModelAdmin):
date_hierarchy = 'created_date'
empty_value_display = '-empty'
list_display = ('title','author','counted_view','status','login_require','published_date','created_date')
list_filter = ('status','author')
# ordering = ['-created_date']
search_fields = ('title','content')
summernote_fields = ('content',)
class CommentAdmin(admin.ModelAdmin):
date_hierarchy = 'created_date'
empty_value_display = '-empty'
list_display = ('name','post','approved','created_date')
list_filter = ('post','approved')
# ordering = ['-created_date']
search_fields = ('name','post')
admin.site.register(Comment,CommentAdmin)
admin.site.register(Category)
admin.site.register(POST,POSTAdmin)
| 34.214286
| 109
| 0.721294
|
95af88118de8d1e3e33813b45cf6c99fc649f549
| 5,227
|
py
|
Python
|
train_imp.py
|
chanyh0/pytorch-CycleGAN-and-pix2pix
|
4d59a4695fdcf80d05b40c5eff0b35af4271e985
|
[
"BSD-3-Clause"
] | null | null | null |
train_imp.py
|
chanyh0/pytorch-CycleGAN-and-pix2pix
|
4d59a4695fdcf80d05b40c5eff0b35af4271e985
|
[
"BSD-3-Clause"
] | null | null | null |
train_imp.py
|
chanyh0/pytorch-CycleGAN-and-pix2pix
|
4d59a4695fdcf80d05b40c5eff0b35af4271e985
|
[
"BSD-3-Clause"
] | null | null | null |
"""General-purpose training script for image-to-image translation.
This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
It first creates model, dataset, and visualizer given the option.
It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
The script supports continue/resume training. Use '--continue_train' to resume your previous training.
Example:
Train a CycleGAN model:
python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
Train a pix2pix model:
python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
See options/base_options.py and options/train_options.py for more training options.
See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
"""
import time
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
print('The number of training images = %d' % dataset_size)
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
total_iters = 0 # the total number of training iterations
for round_ in range(11):
model.save_init('init')
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
model.update_learning_rate() # update learning rates in the beginning of every epoch.
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
total_iters += opt.batch_size
epoch_iter += opt.batch_size
model.set_input(data) # unpack data from dataset and apply preprocessing
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
t_comp = (time.time() - iter_start_time) / opt.batch_size
visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
if opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.prune('init')
model.save_networks(epoch, extra="round{}".format(round_))
| 62.22619
| 190
| 0.666539
|
b44e43ac8bbbda2be07594e9a9b2ab3c082aaae1
| 777
|
py
|
Python
|
tests/mainnet-fork/test_mainnet_fork_price_feed.py
|
PatrickAlphaC/xDai-brownie
|
91e50772716fe349cc15645450af5c1ed9bf1d55
|
[
"MIT"
] | 2
|
2021-02-25T23:12:40.000Z
|
2021-03-17T19:55:07.000Z
|
tests/mainnet-fork/test_mainnet_fork_price_feed.py
|
PatrickAlphaC/xDai-brownie
|
91e50772716fe349cc15645450af5c1ed9bf1d55
|
[
"MIT"
] | 1
|
2021-01-30T04:26:47.000Z
|
2021-02-02T23:55:33.000Z
|
tests/mainnet-fork/test_mainnet_fork_price_feed.py
|
PatrickAlphaC/chainlink-mix
|
806d57638367254af992de87aef88a5d765ce3b3
|
[
"MIT"
] | 2
|
2021-01-19T08:53:14.000Z
|
2021-04-12T08:36:20.000Z
|
import pytest
from brownie import PriceFeed, accounts, network
def test_can_deploy_contract(mainnet_eth_usd_address):
# Arrange
if network.show_active() != 'mainnet-fork':
pytest.skip('Only works for mainnet-fork network')
# Act
price_feed = PriceFeed.deploy(
mainnet_eth_usd_address, {'from': accounts[0]})
# Assert
assert price_feed is not None
def test_can_get_latest_price(mainnet_eth_usd_address):
# Arrange
if network.show_active() != 'mainnet-fork':
pytest.skip('Only works for mainnet-fork network')
# Act
price_feed = PriceFeed.deploy(
mainnet_eth_usd_address, {'from': accounts[0]})
# Assert
value = price_feed.getLatestPrice({'from': accounts[0]})
assert isinstance(value, int)
| 29.884615
| 60
| 0.696268
|
9aa3f7c5dea02e6baddb4029d21fc156cbce3a84
| 317
|
py
|
Python
|
redata/checks/data_volume.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
redata/checks/data_volume.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
redata/checks/data_volume.py
|
mociarain/redata
|
fc4e9826ab47920677383c97e51b6a8bdbd58a4b
|
[
"MIT"
] | null | null | null |
from datetime import date, datetime, time
from sqlalchemy.sql import text
from redata.db_operations import metrics_db, metrics_session
from redata.metric import Metric
def check_data_volume(db, table, check, time_interval, conf):
result = db.check_data_volume(table, time_interval, conf)
return [result]
| 24.384615
| 61
| 0.791798
|
13ed7575314f02cee7ac68e5c1581480c17339e3
| 877
|
py
|
Python
|
old/tests/auto-testing/master.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
old/tests/auto-testing/master.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
old/tests/auto-testing/master.py
|
ali493/pyro
|
1245340077a733e2ab35765eae783b358d2f3af9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 15:27:04 2016
@author: alex
"""
import numpy as np
import os
import importlib
from AlexRobotics.dynamic import Manipulator
###########################
# Load 2 dof robot
###########################
x_start = [-1,0.3,0,0]
###########################
# test student controller
###########################
os.chdir('students/')
for file in os.listdir():
Robot = Manipulator.TwoLinkManipulator()
name, extension = os.path.splitext( file )
code2test = importlib.import_module(name)
# Asign sudent controller to simulator
Robot.ctl = code2test.ctl
# Simulation
Robot.plotAnimation( x_start , tf=5, n=10001, solver='euler')
Robot.fig.canvas.set_window_title(name)
print(name,' Integral Cost:', Robot.Sim.J, ' Note:', max([0,100-Robot.Sim.J*0.05]))
| 19.488889
| 87
| 0.566705
|
057bff3b140116de9e70f739b48e67b2e8435e40
| 1,263
|
py
|
Python
|
web/blog/serializers.py
|
bandirom/django-blog
|
a8232ee8e4b7380b0760296de865cca2c5feda87
|
[
"MIT"
] | 1
|
2021-08-11T10:51:28.000Z
|
2021-08-11T10:51:28.000Z
|
web/blog/serializers.py
|
bandirom/django-blog
|
a8232ee8e4b7380b0760296de865cca2c5feda87
|
[
"MIT"
] | null | null | null |
web/blog/serializers.py
|
bandirom/django-blog
|
a8232ee8e4b7380b0760296de865cca2c5feda87
|
[
"MIT"
] | 6
|
2021-04-07T17:03:52.000Z
|
2021-07-18T04:46:59.000Z
|
from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Category, Article, Comment
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'full_name', 'email')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'user', 'author', 'content', 'updated')
class CategorySerializer(serializers.ModelSerializer):
slug = serializers.SlugField(read_only=True, allow_unicode=True)
class Meta:
model = Category
fields = ('id', 'name', 'slug')
class ArticleSerializer(serializers.ModelSerializer):
url = serializers.CharField(source='get_absolute_url')
author = UserSerializer()
category = CategorySerializer()
comments_count = serializers.IntegerField()
class Meta:
model = Article
fields = ('title', 'url', 'author', 'category', 'created', 'updated', 'comments_count')
class FullArticleSerializer(ArticleSerializer):
comments = CommentSerializer(source='comment_set', many=True)
class Meta(ArticleSerializer.Meta):
fields = ArticleSerializer.Meta.fields + ('content', 'comments',)
| 26.3125
| 95
| 0.699129
|
99a6a26b4a15eafaa8e1fd8c54a1589b7c16c1e0
| 631
|
py
|
Python
|
packages/aws-cdk/lib/init-templates/v1/sample-app/python/tests/unit/test_%name.PythonModule%_stack.template.py
|
RichiCoder1/aws-cdk
|
626e6aa1a27feffe7ce60a46a6fdcf26f317eaef
|
[
"Apache-2.0"
] | 6,159
|
2019-07-11T16:53:02.000Z
|
2022-03-31T20:52:53.000Z
|
packages/aws-cdk/lib/init-templates/v1/sample-app/python/tests/unit/test_%name.PythonModule%_stack.template.py
|
RichiCoder1/aws-cdk
|
626e6aa1a27feffe7ce60a46a6fdcf26f317eaef
|
[
"Apache-2.0"
] | 16,881
|
2019-07-11T18:58:07.000Z
|
2022-03-31T23:59:47.000Z
|
packages/aws-cdk/lib/init-templates/v1/sample-app/python/tests/unit/test_%name.PythonModule%_stack.template.py
|
RichiCoder1/aws-cdk
|
626e6aa1a27feffe7ce60a46a6fdcf26f317eaef
|
[
"Apache-2.0"
] | 2,504
|
2019-07-11T17:52:52.000Z
|
2022-03-31T21:19:53.000Z
|
from aws_cdk import (
core,
assertions
)
from %name.PythonModule%.%name.PythonModule%_stack import %name.PascalCased%Stack
def test_sqs_queue_created():
app = core.App()
stack = %name.PascalCased%Stack(app, "%name.StackName%")
template = assertions.Template.from_stack(stack)
template.has_resource_properties("AWS::SQS::Queue", {
"VisibilityTimeout": 300
})
def test_sns_topic_created():
app = core.App()
stack = %name.PascalCased%Stack(app, "%name.StackName%")
template = assertions.Template.from_stack(stack)
template.resource_count_is("AWS::SNS::Topic", 1)
| 25.24
| 81
| 0.684628
|
fe797ce88da30e74ae46e836de1684a882cb5715
| 925
|
py
|
Python
|
crawling_scraping/tabeloghatebu/myproject/myproject/items.py
|
litteletips/crawling_scraping-scrapy_tool
|
6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0
|
[
"MIT"
] | null | null | null |
crawling_scraping/tabeloghatebu/myproject/myproject/items.py
|
litteletips/crawling_scraping-scrapy_tool
|
6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0
|
[
"MIT"
] | 16
|
2021-03-19T09:44:52.000Z
|
2022-03-12T00:22:14.000Z
|
crawling_scraping/tabeloghatebu/myproject/myproject/items.py
|
litteletips/crawling_scraping
|
6d70b4d2a91f2d2bebcc5266ed43ad9be4723bc0
|
[
"MIT"
] | null | null | null |
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
# Spiderが抜き出したデータを格納しておくためのモジュール。
# Restaurant():食べログレストラン情報
# Page():Webページ情報を保存する為のItemクラス
import scrapy
class Restaurant(scrapy.Item):
"""
食べログのレストラン情報。
"""
name = scrapy.Field()
address = scrapy.Field()
latitude = scrapy.Field()
longitude = scrapy.Field()
station = scrapy.Field()
score = scrapy.Field()
class Page(scrapy.Item):
"""
Webページ。
"""
url = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
def __repr__(self):
"""
ログへの出力時に長くなり過ぎないよう、contentを省略する。
"""
p = Page(self) # このPageを複製したPageを得る。
if len(p['content']) > 100:
p['content'] = p['content'][:100] + '...' # 100文字より長い場合は省略する。
return super(Page, p).__repr__() # 複製したPageの文字列表現を返す。
| 20.555556
| 74
| 0.610811
|
e97687de3427378489a91eaa65da3b8f5bee7b3b
| 15,952
|
py
|
Python
|
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-04-28T05:15:42.000Z
|
2021-04-28T05:15:42.000Z
|
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/arrays/_mixins.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from functools import wraps
from typing import (
Any,
Sequence,
TypeVar,
cast,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
F,
PositionalIndexer2D,
Shape,
type_t,
)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._validators import (
validate_bool_kwarg,
validate_fillna_kwargs,
)
from pandas.core.dtypes.common import is_dtype_equal
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import array_equivalent
from pandas.core import missing
from pandas.core.algorithms import (
take,
unique,
value_counts,
)
from pandas.core.array_algos.transforms import shift
from pandas.core.arrays.base import ExtensionArray
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.sorting import nargminmax
NDArrayBackedExtensionArrayT = TypeVar(
"NDArrayBackedExtensionArrayT", bound="NDArrayBackedExtensionArray"
)
def ravel_compat(meth: F) -> F:
"""
Decorator to ravel a 2D array before passing it to a cython operation,
then reshape the result to our own shape.
"""
@wraps(meth)
def method(self, *args, **kwargs):
if self.ndim == 1:
return meth(self, *args, **kwargs)
flags = self._ndarray.flags
flat = self.ravel("K")
result = meth(flat, *args, **kwargs)
order = "F" if flags.f_contiguous else "C"
return result.reshape(self.shape, order=order)
return cast(F, method)
class NDArrayBackedExtensionArray(ExtensionArray):
"""
ExtensionArray that is backed by a single NumPy ndarray.
"""
_ndarray: np.ndarray
def _from_backing_data(
self: NDArrayBackedExtensionArrayT, arr: np.ndarray
) -> NDArrayBackedExtensionArrayT:
"""
Construct a new ExtensionArray `new_array` with `arr` as its _ndarray.
This should round-trip:
self == self._from_backing_data(self._ndarray)
"""
raise AbstractMethodError(self)
def _box_func(self, x):
"""
Wrap numpy type in our dtype.type if necessary.
"""
return x
def _validate_scalar(self, value):
# used by NDArrayBackedExtensionIndex.insert
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
def take(
self: NDArrayBackedExtensionArrayT,
indices: Sequence[int],
*,
allow_fill: bool = False,
fill_value: Any = None,
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
if allow_fill:
fill_value = self._validate_fill_value(fill_value)
new_data = take(
self._ndarray,
# error: Argument 2 to "take" has incompatible type "Sequence[int]";
# expected "ndarray"
indices, # type: ignore[arg-type]
allow_fill=allow_fill,
fill_value=fill_value,
axis=axis,
)
return self._from_backing_data(new_data)
def _validate_fill_value(self, fill_value):
"""
If a fill_value is passed to `take` convert it to a representation
suitable for self._ndarray, raising TypeError if this is not possible.
Parameters
----------
fill_value : object
Returns
-------
fill_value : native representation
Raises
------
TypeError
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# TODO: make this a cache_readonly; for that to work we need to remove
# the _index_data kludge in libreduction
@property
def shape(self) -> Shape:
return self._ndarray.shape
def __len__(self) -> int:
return self.shape[0]
@cache_readonly
def ndim(self) -> int:
return len(self.shape)
@cache_readonly
def size(self) -> int:
return self._ndarray.size
@cache_readonly
def nbytes(self) -> int:
return self._ndarray.nbytes
def reshape(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.reshape(*args, **kwargs)
return self._from_backing_data(new_data)
def ravel(
self: NDArrayBackedExtensionArrayT, *args, **kwargs
) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.ravel(*args, **kwargs)
return self._from_backing_data(new_data)
@property
def T(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.T
return self._from_backing_data(new_data)
# ------------------------------------------------------------------------
def equals(self, other) -> bool:
if type(self) is not type(other):
return False
if not is_dtype_equal(self.dtype, other.dtype):
return False
return bool(array_equivalent(self._ndarray, other._ndarray))
def _values_for_argsort(self) -> np.ndarray:
return self._ndarray
# Signature of "argmin" incompatible with supertype "ExtensionArray"
def argmin(self, axis: int = 0, skipna: bool = True): # type:ignore[override]
# override base class by adding axis keyword
validate_bool_kwarg(skipna, "skipna")
if not skipna and self.isna().any():
raise NotImplementedError
return nargminmax(self, "argmin", axis=axis)
# Signature of "argmax" incompatible with supertype "ExtensionArray"
def argmax(self, axis: int = 0, skipna: bool = True): # type:ignore[override]
# override base class by adding axis keyword
validate_bool_kwarg(skipna, "skipna")
if not skipna and self.isna().any():
raise NotImplementedError
return nargminmax(self, "argmax", axis=axis)
def copy(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = self._ndarray.copy()
return self._from_backing_data(new_data)
def repeat(
self: NDArrayBackedExtensionArrayT, repeats, axis=None
) -> NDArrayBackedExtensionArrayT:
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat((), {"axis": axis})
new_data = self._ndarray.repeat(repeats, axis=axis)
return self._from_backing_data(new_data)
def unique(self: NDArrayBackedExtensionArrayT) -> NDArrayBackedExtensionArrayT:
new_data = unique(self._ndarray)
return self._from_backing_data(new_data)
@classmethod
@doc(ExtensionArray._concat_same_type)
def _concat_same_type(
cls: type[NDArrayBackedExtensionArrayT],
to_concat: Sequence[NDArrayBackedExtensionArrayT],
axis: int = 0,
) -> NDArrayBackedExtensionArrayT:
dtypes = {str(x.dtype) for x in to_concat}
if len(dtypes) != 1:
raise ValueError("to_concat must have the same dtype (tz)", dtypes)
new_values = [x._ndarray for x in to_concat]
new_values = np.concatenate(new_values, axis=axis)
# error: Argument 1 to "_from_backing_data" of "NDArrayBackedExtensionArray" has
# incompatible type "List[ndarray]"; expected "ndarray"
return to_concat[0]._from_backing_data(new_values) # type: ignore[arg-type]
@doc(ExtensionArray.searchsorted)
def searchsorted(self, value, side="left", sorter=None):
value = self._validate_searchsorted_value(value)
return self._ndarray.searchsorted(value, side=side, sorter=sorter)
def _validate_searchsorted_value(self, value):
return value
@doc(ExtensionArray.shift)
def shift(self, periods=1, fill_value=None, axis=0):
fill_value = self._validate_shift_value(fill_value)
new_values = shift(self._ndarray, periods, axis, fill_value)
return self._from_backing_data(new_values)
def _validate_shift_value(self, fill_value):
# TODO: after deprecation in datetimelikearraymixin is enforced,
# we can remove this and ust validate_fill_value directly
return self._validate_fill_value(fill_value)
def __setitem__(self, key, value):
key = check_array_indexer(self, key)
value = self._validate_setitem_value(value)
self._ndarray[key] = value
def _validate_setitem_value(self, value):
return value
def __getitem__(
self: NDArrayBackedExtensionArrayT,
key: PositionalIndexer2D,
) -> NDArrayBackedExtensionArrayT | Any:
if lib.is_integer(key):
# fast-path
result = self._ndarray[key]
if self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# error: Value of type variable "AnyArrayLike" of "extract_array" cannot be
# "Union[int, slice, ndarray]"
# error: Incompatible types in assignment (expression has type "ExtensionArray",
# variable has type "Union[int, slice, ndarray]")
key = extract_array( # type: ignore[type-var,assignment]
key, extract_numpy=True
)
key = check_array_indexer(self, key)
result = self._ndarray[key]
if lib.is_scalar(result):
return self._box_func(result)
result = self._from_backing_data(result)
return result
@doc(ExtensionArray.fillna)
def fillna(
self: NDArrayBackedExtensionArrayT, value=None, method=None, limit=None
) -> NDArrayBackedExtensionArrayT:
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
mask = self.isna()
# error: Argument 2 to "check_value_size" has incompatible type
# "ExtensionArray"; expected "ndarray"
value = missing.check_value_size(
value, mask, len(self) # type: ignore[arg-type]
)
if mask.any():
if method is not None:
# TODO: check value is None
# (for now) when self.ndim == 2, we assume axis=0
func = missing.get_fill_func(method, ndim=self.ndim)
new_values, _ = func(self._ndarray.T.copy(), limit=limit, mask=mask.T)
new_values = new_values.T
# TODO: PandasArray didn't used to copy, need tests for this
new_values = self._from_backing_data(new_values)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
# We validate the fill_value even if there is nothing to fill
if value is not None:
self._validate_setitem_value(value)
new_values = self.copy()
return new_values
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = f"'{type(self).__name__}' does not implement reduction '{name}'"
raise TypeError(msg)
def _wrap_reduction_result(self, axis: int | None, result):
if axis is None or self.ndim == 1:
return self._box_func(result)
return self._from_backing_data(result)
# ------------------------------------------------------------------------
def __repr__(self) -> str:
if self.ndim == 1:
return super().__repr__()
from pandas.io.formats.printing import format_object_summary
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
lines = [
format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(
", \n"
)
for x in self
]
data = ",\n".join(lines)
class_name = f"<{type(self).__name__}>"
return f"{class_name}\n[\n{data}\n]\nShape: {self.shape}, dtype: {self.dtype}"
# ------------------------------------------------------------------------
# __array_function__ methods
def putmask(self: NDArrayBackedExtensionArrayT, mask: np.ndarray, value) -> None:
"""
Analogue to np.putmask(self, mask, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Raises
------
TypeError
If value cannot be cast to self.dtype.
"""
value = self._validate_setitem_value(value)
np.putmask(self._ndarray, mask, value)
def where(
self: NDArrayBackedExtensionArrayT, mask: np.ndarray, value
) -> NDArrayBackedExtensionArrayT:
"""
Analogue to np.where(mask, self, value)
Parameters
----------
mask : np.ndarray[bool]
value : scalar or listlike
Raises
------
TypeError
If value cannot be cast to self.dtype.
"""
value = self._validate_setitem_value(value)
res_values = np.where(mask, self._ndarray, value)
return self._from_backing_data(res_values)
def delete(
self: NDArrayBackedExtensionArrayT, loc, axis: int = 0
) -> NDArrayBackedExtensionArrayT:
res_values = np.delete(self._ndarray, loc, axis=axis)
return self._from_backing_data(res_values)
def swapaxes(
self: NDArrayBackedExtensionArrayT, axis1, axis2
) -> NDArrayBackedExtensionArrayT:
res_values = self._ndarray.swapaxes(axis1, axis2)
return self._from_backing_data(res_values)
# ------------------------------------------------------------------------
# Additional array methods
# These are not part of the EA API, but we implement them because
# pandas assumes they're there.
def value_counts(self, dropna: bool = True):
"""
Return a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NA values.
Returns
-------
Series
"""
if self.ndim != 1:
raise NotImplementedError
from pandas import (
Index,
Series,
)
if dropna:
# error: Unsupported operand type for ~ ("ExtensionArray")
values = self[~self.isna()]._ndarray # type: ignore[operator]
else:
values = self._ndarray
result = value_counts(values, sort=False, dropna=dropna)
index_arr = self._from_backing_data(np.asarray(result.index._data))
index = Index(index_arr, name=result.index.name)
return Series(result._values, index=index, name=result.name)
# ------------------------------------------------------------------------
# numpy-like methods
@classmethod
def _empty(
cls: type_t[NDArrayBackedExtensionArrayT], shape: Shape, dtype: ExtensionDtype
) -> NDArrayBackedExtensionArrayT:
"""
Analogous to np.empty(shape, dtype=dtype)
Parameters
----------
shape : tuple[int]
dtype : ExtensionDtype
"""
# The base implementation uses a naive approach to find the dtype
# for the backing ndarray
arr = cls._from_sequence([], dtype=dtype)
backing = np.empty(shape, dtype=arr._ndarray.dtype)
return arr._from_backing_data(backing)
| 32.488798
| 88
| 0.606131
|
db05c6f7cfb30942e974fa246203fd7193899160
| 16,097
|
py
|
Python
|
microprocessor/processor.py
|
AlexLitvino/i8080_simulator
|
fedd950c5e2843f53276f11be9878abbbb5d7c65
|
[
"Apache-2.0"
] | null | null | null |
microprocessor/processor.py
|
AlexLitvino/i8080_simulator
|
fedd950c5e2843f53276f11be9878abbbb5d7c65
|
[
"Apache-2.0"
] | null | null | null |
microprocessor/processor.py
|
AlexLitvino/i8080_simulator
|
fedd950c5e2843f53276f11be9878abbbb5d7c65
|
[
"Apache-2.0"
] | null | null | null |
from time import clock
import sys
sys.path.append("./../")
sys.path.append("./../common")
#sys.path.append("./../microprocessor")
from microprocessor.memory import Memory
from microprocessor.register import Register
from microprocessor.instructions_decoder import cmd_decoder
from microprocessor.misc_registers import *
from microprocessor.port import Port
from hex_reader import populate_memory
from common.constants import _IN, _OUT
from common.utilities import get_bit, hex_formatter
from common.command_cycles import cycles
from common.command_cycles import REGULAR, REGISTER, MEMORY, NEXT_CMD, RET, CALL
import common.commands as cmd
class Processor:
_MAX_LOOP = 30
def __init__(self, file_name):
self.file_name = file_name
self.memory = Memory()
#self.register_file = RegisterFile()
# set PC to 0
# TODO: None parameters, should be avoided?
self.F = StatusRegister()
self.A = Accumulator("A", None, self.F) # accumulator
self.PC = ProgramCounter()
self.SP = StackPointer()
self.B = Register("B", self.A, self.F)
self.C = Register("C", self.A, self.F)
self.D = Register("D", self.A, self.F)
self.E = Register("E", self.A, self.F)
self.H = Register("H", self.A, self.F)
self.L = Register("L", self.A, self.F)
self.in_ports = {}
self.out_ports = {}
self.frequency = 1
self._halt_flag = False
def start(self):
pass
def load(self):
self.memory._memory = populate_memory(self.file_name)
self.memory.dump()
def dump(self):
print('*'*20)
print("A:" + hex_formatter(self.A.value) + '\t' + "F:" + "UNKNOWN") # TODO: should add bin formatter or special printing for F register
print("B:" + hex_formatter(self.B.value) + '\t' + "C:" + hex_formatter(self.C.value))
print("D:" + hex_formatter(self.D.value) + '\t' + "E:" + hex_formatter(self.E.value))
print("H:" + hex_formatter(self.H.value) + '\t' + "L:" + hex_formatter(self.L.value))
print("PC:" + hex_formatter(self.PC.value))
print("SP:" + "UNKNOWN")
print('*'*20)
def start_from_address(self, start_address):
self.PC.value = start_address
def _perform_clock_cycles(self, cycles):
pass
def get_port(self, port_number, direction):
port = None
ports_list = {_IN: self.in_ports, _OUT: self.out_ports}.get(direction, None)
if port_number in ports_list:
port = ports_list[port_number]
else:
port = Port(port_number, direction)
ports_list[port_number] = port
return port
COMMAND_DICT = {} # TODO: Is it necessary variable?
def run(self):
iteration = 0
while True:
print("Iteration " + str(iteration) + " Start" + '*'*10)
#self.dump()
print("PC = " + str(self.PC.value))
print("MEMORY = " + str(self.memory[self.PC.value]))
cmd_tuple = cmd_decoder(self.memory[self.PC.value])
print("cmd_tuple::" + str(cmd_tuple))
cmd_name = cmd_tuple[0]
operand1 = cmd_tuple[1]
operand2 = cmd_tuple[2]
print("cmd_name::" + str(cmd_name))
print("operand1::" + str(operand1))
print("operand2::" + str(operand2))
self._get_command_handler(cmd_name)(operand1, operand2)
#self.dump()
print("Iteration " + str(iteration) + " End" + '*'*10)
print()
if self._halt_flag:
break
iteration += 1
if iteration >= Processor._MAX_LOOP:
break
def _get_command_handler(self, cmd_name):
print(cmd_name)
command = self.__getattribute__("_cmd_" + cmd_name + "_handler")
return command
def _clock(self, number_of_cycles, correction):
pass
####################################################################################################################
# Command handlers section
####################################################################################################################
# Move, load and store
def _cmd_mov_handler(self, operand1, operand2):
raise NotImplementedError("MOV not implemented yet.")
def _cmd_mvi_handler(self, operand1, operand2):
command_start_time = clock()
print("In MVI")
destination = operand1
print(destination)
r = self.__getattribute__(destination)
self.PC.inc()
r.value = self.memory[self.PC.value]
self.PC.inc()
command_end_time = clock()
self._clock(cycles[cmd.mvi][REGISTER], command_end_time - command_start_time)
def _cmd_lxi_handler(self, operand1, operand2):
# TODO: It doesn't support LXI SP
print("In LXI")
rp = operand1
self.PC.inc()
lb = self.memory[self.PC.value]
self.__getattribute__(rp[1]).value = lb
self.PC.inc()
hb = self.memory[self.PC.value]
self.__getattribute__(rp[0]).value = hb
self.PC.inc()
def _cmd_stax_handler(self, operand1, operand2):
raise NotImplementedError("STAX not implemented yet.")
def _cmd_ldax_handler(self, operand1, operand2):
print("In LDAX")
# Load A from the memory cell with address Loc(BC)
rp = operand1
rh = self.__getattribute__(rp[0])
print("rh::" + str(rh.name))
rl = self.__getattribute__(rp[1])
print("rl::" + str(rl.name))
memory_address = (rh.value << 8) + rl.value
print("memory_address " + str(memory_address))
self.A.value = self.memory[memory_address]
print("A::" + str(self.A.value))
self.PC.inc()
pass
def _cmd_sta_handler(self, operand1, operand2):
raise NotImplementedError("STA not implemented yet.")
def _cmd_lda_handler(self, operand1, operand2):
raise NotImplementedError("LDA not implemented yet.")
def _cmd_shld_handler(self, operand1, operand2):
raise NotImplementedError("SHLD not implemented yet.")
def _cmd_lhld_handler(self, operand1, operand2):
raise NotImplementedError("LHLD not implemented yet.")
def _cmd_xchg_handler(self, operand1, operand2):
raise NotImplementedError("XCHG not implemented yet.")
# Stack operations
def _cmd_push_handler(self, operand1, operand2):
raise NotImplementedError("PUSH not implemented yet.")
def _cmd_pop_handler(self, operand1, operand2):
raise NotImplementedError("POP not implemented yet.")
def _cmd_xthl_handler(self, operand1, operand2):
raise NotImplementedError("XTHL not implemented yet.")
def _cmd_sphl_handler(self, operand1, operand2):
raise NotImplementedError("SPHL not implemented yet.")
# Jump
def _cmd_jmp_handler(self, operand1, operand2):
raise NotImplementedError("JMP not implemented yet.")
def _cmd_jc_handler(self, operand1, operand2):
raise NotImplementedError("JC not implemented yet.")
def _cmd_jnc_handler(self, operand1, operand2):
raise NotImplementedError("JNC not implemented yet.")
def _cmd_jz_handler(self, operand1, operand2):
raise NotImplementedError("JZ not implemented yet.")
def _cmd_jnz_handler(self, operand1, operand2):
print("In JNZ")
# TODO:
self.PC.inc()
ml = self.memory[self.PC.value]
self.PC.inc()
mh = self.memory[self.PC.value]
#self.PC.inc()
address = (mh << 8) + ml
if self.F.is_flag_cleared('Z'):
self.PC.value = address
else:
self.PC.inc()
pass
def _cmd_jp_handler(self, operand1, operand2):
raise NotImplementedError("JP not implemented yet.")
def _cmd_jm_handler(self, operand1, operand2):
raise NotImplementedError("JM not implemented yet.")
def _cmd_jpe_handler(self, operand1, operand2):
raise NotImplementedError("JPE not implemented yet.")
def _cmd_jpo_handler(self, operand1, operand2):
raise NotImplementedError("JPO not implemented yet.")
def _cmd_pchl_handler(self, operand1, operand2):
raise NotImplementedError("PCHL not implemented yet.")
# Call
def _cmd_call_handler(self, operand1, operand2):
raise NotImplementedError("CALL not implemented yet.")
def _cmd_cc_handler(self, operand1, operand2):
raise NotImplementedError("CC not implemented yet.")
def _cmd_cnc_handler(self, operand1, operand2):
raise NotImplementedError("CNC not implemented yet.")
def _cmd_cz_handler(self, operand1, operand2):
raise NotImplementedError("CZ not implemented yet.")
def _cmd_cnz_handler(self, operand1, operand2):
raise NotImplementedError("CNZ not implemented yet.")
def _cmd_cp_handler(self, operand1, operand2):
raise NotImplementedError("CP not implemented yet.")
def _cmd_cm_handler(self, operand1, operand2):
raise NotImplementedError("CM not implemented yet.")
def _cmd_cpe_handler(self, operand1, operand2):
raise NotImplementedError("CPE not implemented yet.")
def _cmd_cpo_handler(self, operand1, operand2):
raise NotImplementedError("CPO not implemented yet.")
# Return
def _cmd_ret_handler(self, operand1, operand2):
raise NotImplementedError("RET not implemented yet.")
def _cmd_rc_handler(self, operand1, operand2):
raise NotImplementedError("RC not implemented yet.")
def _cmd_rnc_handler(self, operand1, operand2):
raise NotImplementedError("RNC not implemented yet.")
def _cmd_rz_handler(self, operand1, operand2):
raise NotImplementedError("RZ not implemented yet.")
def _cmd_rnz_handler(self, operand1, operand2):
raise NotImplementedError("RNZ not implemented yet.")
def _cmd_rp_handler(self, operand1, operand2):
raise NotImplementedError("RP not implemented yet.")
def _cmd_rm_handler(self, operand1, operand2):
raise NotImplementedError("RM not implemented yet.")
def _cmd_rpe_handler(self, operand1, operand2):
raise NotImplementedError("RPE not implemented yet.")
def _cmd_rpo_handler(self, operand1, operand2):
raise NotImplementedError("RPO not implemented yet.")
# Restart
def _cmd_rst_handler(self, operand1, operand2):
raise NotImplementedError("RST not implemented yet.")
# Increment and decrement
def _cmd_inr_handler(self, operand1, operand2):
raise NotImplementedError("INR not implemented yet.")
def _cmd_dcr_handler(self, operand1, operand2):
print("In DCR")
r = operand1
reg = self.__getattribute__(r)
reg.dcr()
self.PC.inc()
pass
def _cmd_inx_handler(self, operand1, operand2):
# TODO: It doesn't support INX SP
print("In INX")
rp = operand1
rh = self.__getattribute__(rp[0])
rl = self.__getattribute__(rp[1])
rp_value = (rh.value << 8) + rl.value
rp_value += 1
rh.value = (rp_value & 0xFF00) >> 8
rl.value = (rp_value & 0xFF)
self.PC.inc()
pass
def _cmd_dcx_handler(self, operand1, operand2):
# TODO: It doesn't support DCX SP
raise NotImplementedError("DCX not implemented yet.")
# Add
def _cmd_add_handler(self, operand1, operand2):
raise NotImplementedError("ADD not implemented yet.")
def _cmd_adc_handler(self, operand1, operand2):
raise NotImplementedError("ADC not implemented yet.")
def _cmd_adi_handler(self, operand1, operand2):
raise NotImplementedError("ADI not implemented yet.")
def _cmd_aci_handler(self, operand1, operand2):
raise NotImplementedError("ACI not implemented yet.")
def _cmd_dad_handler(self, operand1, operand2):
raise NotImplementedError("DAD not implemented yet.")
# Subtract
def _cmd_sub_handler(self, operand1, operand2):
raise NotImplementedError("SUB not implemented yet.")
def _cmd_sbb_handler(self, operand1, operand2):
raise NotImplementedError("SBB not implemented yet.")
def _cmd_sui_handler(self, operand1, operand2):
raise NotImplementedError("SUI not implemented yet.")
def _cmd_sbi_handler(self, operand1, operand2):
raise NotImplementedError("SBI not implemented yet.")
# Logical
def _cmd_ana_handler(self, operand1, operand2):
raise NotImplementedError("ANA not implemented yet.")
def _cmd_xra_handler(self, operand1, operand2):
raise NotImplementedError("XRA not implemented yet.")
def _cmd_ora_handler(self, operand1, operand2):
raise NotImplementedError("ORA not implemented yet.")
def _cmd_cmp_handler(self, operand1, operand2):
raise NotImplementedError("CMP not implemented yet.")
def _cmd_ani_handler(self, operand1, operand2):
raise NotImplementedError("ANI not implemented yet.")
def _cmd_xri_handler(self, operand1, operand2):
raise NotImplementedError("XRI not implemented yet.")
def _cmd_ori_handler(self, operand1, operand2):
raise NotImplementedError("ORI not implemented yet.")
def _cmd_cpi_handler(self, operand1, operand2):
raise NotImplementedError("CPI not implemented yet.")
# Rotate
def _cmd_rlc_handler(self, operand1, operand2):
raise NotImplementedError("RLC not implemented yet.")
def _cmd_rrc_handler(self, operand1, operand2):
raise NotImplementedError("RRC not implemented yet.")
def _cmd_ral_handler(self, operand1, operand2):
raise NotImplementedError("RAL not implemented yet.")
def _cmd_rar_handler(self, operand1, operand2):
raise NotImplementedError("RAR not implemented yet.")
# Specials
def _cmd_cma_handler(self, operand1, operand2):
raise NotImplementedError("CMA not implemented yet.")
def _cmd_stc_handler(self, operand1, operand2):
raise NotImplementedError("STC not implemented yet.")
def _cmd_cmc_handler(self, operand1, operand2):
raise NotImplementedError("CMC not implemented yet.")
def _cmd_daa_handler(self, operand1, operand2):
raise NotImplementedError("DAA not implemented yet.")
# Input/Output
# TODO: this could not work because command named in_cmd
def _cmd_in_handler(self, operand1, operand2):
raise NotImplementedError("IN not implemented yet.")
def _cmd_out_handler(self, operand1, operand2):
print("In OUT")
self.PC.inc()
port_number = self.memory[self.PC.value]
print("Output to port #" + str(port_number) + " value " + str(self.A.value))
with open("output.txt", 'a') as f:
f.write(str(chr(self.A.value)) + " ")
self.PC.inc()
pass
# Control
def _cmd_ei_handler(self, operand1, operand2):
raise NotImplementedError("EI not implemented yet.")
def _cmd_di_handler(self, operand1, operand2):
raise NotImplementedError("DI not implemented yet.")
def _cmd_nop_handler(self, operand1, operand2):
raise NotImplementedError("NOP not implemented yet.")
def _cmd_hlt_handler(self, operand1, operand2):
print("In HLT")
self.PC.inc()
self._halt_flag = True
pass
####################################################################################################################
# End of command handlers section
####################################################################################################################
if __name__ == '__main__':
file_name = "./../program_samples/hello.hex"
processor = Processor(file_name)
# port configuration
processor.get_port(255, "IN").set_callback(print)
processor.get_port(255, "IN").set_value(15)
processor.get_port(255, "OUT").get_value()
processor.load()
processor.run()
| 34.103814
| 144
| 0.635087
|
d891fd75aaffd574d7d475153fa9c10ac1d7f060
| 443
|
py
|
Python
|
halfpipe/utils/image.py
|
fossabot/Halfpipe-1
|
9e9fae20467d2c73b67fcb2cc73ed7144d79db3a
|
[
"FTL"
] | null | null | null |
halfpipe/utils/image.py
|
fossabot/Halfpipe-1
|
9e9fae20467d2c73b67fcb2cc73ed7144d79db3a
|
[
"FTL"
] | null | null | null |
halfpipe/utils/image.py
|
fossabot/Halfpipe-1
|
9e9fae20467d2c73b67fcb2cc73ed7144d79db3a
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def niftidim(input, idim):
if isinstance(input, str):
import nibabel as nib
input = nib.load(input)
if len(input.shape) > idim:
return input.shape[idim]
else:
return 1
def nvol(input):
from halfpipe.utils import niftidim
return niftidim(input, 3)
| 21.095238
| 73
| 0.609481
|
efafc05de9cfcad4fe88e78aab33ec4924f6d97a
| 4,604
|
py
|
Python
|
sdk/python/pulumi_aws/ec2transitgateway/get_route_table.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2transitgateway/get_route_table.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/ec2transitgateway/get_route_table.py
|
michael-golden/pulumi-aws
|
165e876e166ecab1870e857822247585d78aef64
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetRouteTableResult:
"""
A collection of values returned by getRouteTable.
"""
def __init__(__self__, default_association_route_table=None, default_propagation_route_table=None, filters=None, id=None, tags=None, transit_gateway_id=None):
if default_association_route_table and not isinstance(default_association_route_table, bool):
raise TypeError("Expected argument 'default_association_route_table' to be a bool")
__self__.default_association_route_table = default_association_route_table
"""
Boolean whether this is the default association route table for the EC2 Transit Gateway
"""
if default_propagation_route_table and not isinstance(default_propagation_route_table, bool):
raise TypeError("Expected argument 'default_propagation_route_table' to be a bool")
__self__.default_propagation_route_table = default_propagation_route_table
"""
Boolean whether this is the default propagation route table for the EC2 Transit Gateway
"""
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
__self__.filters = filters
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
EC2 Transit Gateway Route Table identifier
"""
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
Key-value tags for the EC2 Transit Gateway Route Table
"""
if transit_gateway_id and not isinstance(transit_gateway_id, str):
raise TypeError("Expected argument 'transit_gateway_id' to be a str")
__self__.transit_gateway_id = transit_gateway_id
"""
EC2 Transit Gateway identifier
"""
class AwaitableGetRouteTableResult(GetRouteTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteTableResult(
default_association_route_table=self.default_association_route_table,
default_propagation_route_table=self.default_propagation_route_table,
filters=self.filters,
id=self.id,
tags=self.tags,
transit_gateway_id=self.transit_gateway_id)
def get_route_table(filters=None,id=None,tags=None,opts=None):
"""
Get information on an EC2 Transit Gateway Route Table.
## Example Usage
### By Filter
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2transitgateway.get_route_table(filters=[
{
"name": "default-association-route-table",
"values": ["true"],
},
{
"name": "transit-gateway-id",
"values": ["tgw-12345678"],
},
])
```
### By Identifier
```python
import pulumi
import pulumi_aws as aws
example = aws.ec2transitgateway.get_route_table(id="tgw-rtb-12345678")
```
:param list filters: One or more configuration blocks containing name-values filters. Detailed below.
:param str id: Identifier of the EC2 Transit Gateway Route Table.
:param dict tags: Key-value tags for the EC2 Transit Gateway Route Table
The **filters** object supports the following:
* `name` (`str`) - Name of the filter.
* `values` (`list`) - List of one or more values for the filter.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['id'] = id
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ec2transitgateway/getRouteTable:getRouteTable', __args__, opts=opts).value
return AwaitableGetRouteTableResult(
default_association_route_table=__ret__.get('defaultAssociationRouteTable'),
default_propagation_route_table=__ret__.get('defaultPropagationRouteTable'),
filters=__ret__.get('filters'),
id=__ret__.get('id'),
tags=__ret__.get('tags'),
transit_gateway_id=__ret__.get('transitGatewayId'))
| 37.737705
| 162
| 0.673328
|
82be303d4548a21671033d80e6087bca914d7f5d
| 3,973
|
py
|
Python
|
soxbindings/effects.py
|
pseeth/soxbindings
|
5c2a37969a796a2762c23f6c09c8893538ce396c
|
[
"MIT"
] | 42
|
2020-05-27T10:00:48.000Z
|
2022-03-29T05:29:23.000Z
|
soxbindings/effects.py
|
pseeth/soxbindings
|
5c2a37969a796a2762c23f6c09c8893538ce396c
|
[
"MIT"
] | 7
|
2020-07-15T19:27:00.000Z
|
2022-01-20T10:46:37.000Z
|
soxbindings/effects.py
|
pseeth/soxbindings
|
5c2a37969a796a2762c23f6c09c8893538ce396c
|
[
"MIT"
] | 3
|
2020-07-15T21:11:48.000Z
|
2021-10-05T19:46:40.000Z
|
import numpy as np
from contextlib import contextmanager
MAX_NUM_EFFECTS_ARGS = 20
SOX_UNSPEC = 0
SOX_INITIALIZED = False
def get_available_effects():
from . import _soxbindings
return _soxbindings.get_effect_names()
def initialize_sox():
from . import _soxbindings
return _soxbindings.sox_init()
def quit_sox():
from . import _soxbindings
return _soxbindings.sox_quit()
@contextmanager
def sox_context():
global SOX_INITIALIZED
try:
val = initialize_sox()
SOX_INITIALIZED = True
yield val
finally:
# Code to release resource, e.g.:
quit_sox()
SOX_INITIALIZED = False
def build_flow_effects(input_data, sample_rate_in, sox_effects_chain,
in_channels=None, in_precision=16, out_channels=None,
sample_rate_out=None, out_precision=None):
global SOX_INITIALIZED
if not SOX_INITIALIZED:
with sox_context():
data, sample_rate = _build_flow_effects(
input_data, sample_rate_in, sox_effects_chain,
in_channels=in_channels, in_precision=in_precision,
out_channels=out_channels, sample_rate_out=sample_rate_out,
out_precision=out_precision
)
else:
data, sample_rate = _build_flow_effects(
input_data, sample_rate_in, sox_effects_chain,
in_channels=in_channels, in_precision=in_precision,
out_channels=out_channels, sample_rate_out=sample_rate_out,
out_precision=out_precision
)
return data, sample_rate
def _build_flow_effects(input_data, sample_rate_in, sox_effects_chain,
in_channels=None, in_precision=16, out_channels=None,
sample_rate_out=None, out_precision=None):
from . import _soxbindings
input_signal_info = _soxbindings.sox_signalinfo_t()
input_signal_info.rate = float(sample_rate_in)
if in_channels is None:
in_channels = (
1 if len(input_data.shape) == 1 else input_data.shape[-1]
)
input_signal_info.channels = in_channels
input_signal_info.length = input_data.size
input_signal_info.precision = in_precision
if sample_rate_out is None:
sample_rate_out = sample_rate_in
if out_precision is None:
out_precision = in_precision
if out_channels is None:
out_channels = in_channels
target_signal_info = _soxbindings.sox_signalinfo_t()
target_signal_info.rate = float(sample_rate_out)
target_signal_info.channels = out_channels
target_signal_info.length = SOX_UNSPEC
target_signal_info.precision = out_precision
target_encoding = _soxbindings.sox_encodinginfo_t()
target_encoding.encoding = _soxbindings.SOX_ENCODING_SIGN2
target_encoding.bits_per_sample = out_precision
target_encoding.compression = 0.0
target_encoding.reverse_bytes = _soxbindings.sox_option_default
target_encoding.reverse_nibbles = _soxbindings.sox_option_default
target_encoding.reverse_bits = _soxbindings.sox_option_default
target_encoding.opposite_endian = _soxbindings.sox_false
input_data = input_data.reshape(-1)
input_data = input_data * (1 << 31)
input_data = input_data.astype(np.int32)
sample_rate, num_channels, data = _soxbindings.build_flow_effects(
input_data, input_signal_info,
target_signal_info, target_encoding,
sox_effects_chain, MAX_NUM_EFFECTS_ARGS
)
data = data.reshape(-1, out_channels)
data = data / (1 << 31)
return data, sample_rate
def SoxEffect():
r"""Create an object for passing sox effect information between python and c++
Returns:
SoxEffect: An object with the following attributes: ename (str) which is the
name of effect, and eopts (List[str]) which is a list of effect options.
"""
from . import _soxbindings
return _soxbindings.SoxEffect()
| 35.473214
| 84
| 0.704505
|
5df48163b0aef21d584a60c1915539fc9f51db4f
| 5,001
|
py
|
Python
|
qa/rpc-tests/wallet-hd.py
|
devronkim/kumsl
|
a70f068020f977ac2c9b55e121593e5631a3abc4
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/wallet-hd.py
|
devronkim/kumsl
|
a70f068020f977ac2c9b55e121593e5631a3abc4
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/wallet-hd.py
|
devronkim/kumsl
|
a70f068020f977ac2c9b55e121593e5631a3abc4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']], redirect_stderr=True)
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'], redirect_stderr=True)
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("kumsld exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'], redirect_stderr=True)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'], redirect_stderr=True)
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'], redirect_stderr=True)
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
| 44.256637
| 122
| 0.640272
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.