text stringlengths 4 1.02M | meta dict |
|---|---|
"""Test case for the create volume function."""
import mock
from cinder import exception
from cinder.tests.unit.volume.drivers import disco
class CreateVolumeTestCase(disco.TestDISCODriver):
"""Test cases for DISCO connector."""
def setUp(self):
"""Prepare variables and mock functions."""
super(CreateVolumeTestCase, self).setUp()
# Mock the method volumeCreate.
mock.patch.object(self.requester,
'volumeCreate',
self.perform_disco_request).start()
self.response = self.FAKE_RESPONSE['standard']['success']
def perform_disco_request(self, *cmd, **kwargs):
"""Mock function for the suds client."""
return self.response
def test_create_volume(self):
"""Normal case."""
expected = '1234567'
self.response['result'] = expected
ret = self.driver.create_volume(self.volume)
actual = ret['provider_location']
self.assertEqual(expected, actual)
def test_create_volume_fail(self):
"""Request to DISCO failed."""
self.response = self.FAKE_RESPONSE['standard']['fail']
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
| {
"content_hash": "f011fe723ab148bf0c4eb8e61f594711",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 65,
"avg_line_length": 32.69230769230769,
"alnum_prop": 0.6258823529411764,
"repo_name": "phenoxim/cinder",
"id": "d4006d28998f0737934da69dab6eb18eff4797e9",
"size": "1913",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/disco/test_create_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20325688"
},
{
"name": "Shell",
"bytes": "16353"
}
],
"symlink_target": ""
} |
"""Utilities for loading data."""
import pickle
import typing
import numpy as np
import tensorflow as tf
def augment_data(dataset,
noise_scale):
"""Augments the data.
Args:
dataset: Dictionary with data.
noise_scale: Scale of noise to apply.
Returns:
Augmented data.
"""
noise_std = np.std(np.concatenate(dataset['rewards'], 0))
for k, v in dataset.items():
dataset[k] = np.repeat(v, 3, 0)
dataset['rewards'][1::3] += noise_std * noise_scale
dataset['rewards'][2::3] -= noise_std * noise_scale
return dataset
def weighted_moments(x, weights):
mean = np.sum(x * weights, 0) / np.sum(weights)
sqr_diff = np.sum((x - mean)**2 * weights, 0)
std = np.sqrt(sqr_diff / (weights.sum() - 1))
return mean, std
class Dataset(object):
"""Dataset class for policy evaluation."""
def __init__(self,
data_file_name,
num_trajectories,
normalize_states = False,
normalize_rewards = False,
eps = 1e-5,
noise_scale = 0.0,
bootstrap = True):
"""Loads data from a file.
Args:
data_file_name: filename with data.
num_trajectories: number of trajectories to select from the dataset.
normalize_states: whether to normalize the states.
normalize_rewards: whether to normalize the rewards.
eps: Epsilon used for normalization.
noise_scale: Data augmentation noise scale.
bootstrap: Whether to generated bootstrapped weights.
"""
with tf.io.gfile.GFile(data_file_name, 'rb') as f:
dataset = pickle.load(f)
for k, v in dataset['trajectories'].items():
dataset['trajectories'][k] = v[:num_trajectories]
if noise_scale > 0.0:
dataset['trajectories'] = augment_data(dataset['trajectories'],
noise_scale)
dataset['trajectories']['steps'] = [
np.arange(len(state_trajectory))
for state_trajectory in dataset['trajectories']['states']
]
dataset['initial_states'] = np.stack([
state_trajectory[0]
for state_trajectory in dataset['trajectories']['states']
])
num_trajectories = len(dataset['trajectories']['states'])
if bootstrap:
dataset['initial_weights'] = np.random.multinomial(
num_trajectories, [1.0 / num_trajectories] * num_trajectories,
1).astype(np.float32)[0]
else:
dataset['initial_weights'] = np.ones(num_trajectories, dtype=np.float32)
dataset['trajectories']['weights'] = []
for i in range(len(dataset['trajectories']['masks'])):
dataset['trajectories']['weights'].append(
np.ones_like(dataset['trajectories']['masks'][i]) *
dataset['initial_weights'][i])
dataset['initial_weights'] = tf.convert_to_tensor(
dataset['initial_weights'])
dataset['initial_states'] = tf.convert_to_tensor(dataset['initial_states'])
for k, v in dataset['trajectories'].items():
if 'initial' not in k:
dataset[k] = tf.convert_to_tensor(
np.concatenate(dataset['trajectories'][k], axis=0))
self.states = dataset['states']
self.actions = dataset['actions']
self.next_states = dataset['next_states']
self.masks = dataset['masks']
self.weights = dataset['weights']
self.rewards = dataset['rewards']
self.steps = dataset['steps']
self.initial_states = dataset['initial_states']
self.initial_weights = dataset['initial_weights']
self.eps = eps
self.model_filename = dataset['model_filename']
if normalize_states:
self.state_mean = tf.reduce_mean(self.states, 0)
self.state_std = tf.math.reduce_std(self.states, 0)
self.initial_states = self.normalize_states(self.initial_states)
self.states = self.normalize_states(self.states)
self.next_states = self.normalize_states(self.next_states)
else:
self.state_mean = 0.0
self.state_std = 1.0
if normalize_rewards:
self.reward_mean = tf.reduce_mean(self.rewards)
if tf.reduce_min(self.masks) == 0.0:
self.reward_mean = tf.zeros_like(self.reward_mean)
self.reward_std = tf.math.reduce_std(self.rewards)
self.rewards = self.normalize_rewards(self.rewards)
else:
self.reward_mean = 0.0
self.reward_std = 1.0
def normalize_states(self, states):
dtype = tf.convert_to_tensor(states).dtype
return ((states - self.state_mean) /
tf.maximum(tf.cast(self.eps, dtype), self.state_std))
def unnormalize_states(self, states):
dtype = tf.convert_to_tensor(states).dtype
return (states * tf.maximum(tf.cast(self.eps, dtype), self.state_std)
+ self.state_mean)
def normalize_rewards(self, rewards):
return (rewards - self.reward_mean) / tf.maximum(self.reward_std, self.eps)
def unnormalize_rewards(self, rewards):
return rewards * tf.maximum(self.reward_std, self.eps) + self.reward_mean
def with_uniform_sampling(self, sample_batch_size):
return tf.data.Dataset.from_tensor_slices(
(self.states, self.actions, self.next_states, self.rewards, self.masks,
self.weights, self.steps)).repeat().shuffle(
self.states.shape[0], reshuffle_each_iteration=True).batch(
sample_batch_size, drop_remainder=True).prefetch(100)
def with_geometric_sampling(self, sample_batch_size,
discount):
"""Creates tf dataset with geometric sampling.
Args:
sample_batch_size: Batch size for sampling.
discount: MDP discount.
Returns:
TensorFlow dataset.
"""
sample_weights = discount**tf.cast(self.steps, tf.float32)
weight_sum = tf.math.cumsum(sample_weights)
def sample_batch(_):
values = tf.random.uniform((sample_batch_size,), 0.0,
weight_sum[-1])
ind = tf.searchsorted(weight_sum, values)
return (tf.gather(self.states, ind,
0), tf.gather(self.actions, ind, 0),
tf.gather(self.next_states, ind,
0), tf.gather(self.rewards, ind, 0),
tf.gather(self.masks, ind,
0), tf.gather(self.weights, ind, 0),
tf.gather(self.steps, ind, 0))
return tf.data.experimental.Counter().map(sample_batch).prefetch(100)
class D4rlDataset(Dataset):
"""Dataset class for policy evaluation."""
# pylint: disable=super-init-not-called
def __init__(self,
d4rl_env,
normalize_states = False,
normalize_rewards = False,
eps = 1e-5,
noise_scale = 0.0,
bootstrap = True):
"""Processes data from D4RL environment.
Args:
d4rl_env: gym.Env corresponding to D4RL environment.
normalize_states: whether to normalize the states.
normalize_rewards: whether to normalize the rewards.
eps: Epsilon used for normalization.
noise_scale: Data augmentation noise scale.
bootstrap: Whether to generated bootstrapped weights.
"""
dataset = dict(
trajectories=dict(
states=[],
actions=[],
next_states=[],
rewards=[],
masks=[]))
d4rl_dataset = d4rl_env.get_dataset()
dataset_length = len(d4rl_dataset['actions'])
new_trajectory = True
for idx in range(dataset_length):
if new_trajectory:
trajectory = dict(
states=[], actions=[], next_states=[], rewards=[], masks=[])
trajectory['states'].append(d4rl_dataset['observations'][idx])
trajectory['actions'].append(d4rl_dataset['actions'][idx])
trajectory['rewards'].append(d4rl_dataset['rewards'][idx])
trajectory['masks'].append(1.0 - d4rl_dataset['terminals'][idx])
if not new_trajectory:
trajectory['next_states'].append(d4rl_dataset['observations'][idx])
end_trajectory = (d4rl_dataset['terminals'][idx] or
d4rl_dataset['timeouts'][idx])
if end_trajectory:
trajectory['next_states'].append(d4rl_dataset['observations'][idx])
if d4rl_dataset['timeouts'][idx] and not d4rl_dataset['terminals'][idx]:
for key in trajectory:
del trajectory[key][-1]
if trajectory['actions']:
for k, v in trajectory.items():
assert len(v) == len(trajectory['actions'])
dataset['trajectories'][k].append(np.array(v, dtype=np.float32))
print('Added trajectory %d with length %d.' % (
len(dataset['trajectories']['actions']),
len(trajectory['actions'])))
new_trajectory = end_trajectory
if noise_scale > 0.0:
dataset['trajectories'] = augment_data(dataset['trajectories'], # pytype: disable=wrong-arg-types # dict-kwargs
noise_scale)
dataset['trajectories']['steps'] = [
np.arange(len(state_trajectory))
for state_trajectory in dataset['trajectories']['states']
]
dataset['initial_states'] = np.stack([
state_trajectory[0]
for state_trajectory in dataset['trajectories']['states']
])
num_trajectories = len(dataset['trajectories']['states'])
if bootstrap:
dataset['initial_weights'] = np.random.multinomial(
num_trajectories, [1.0 / num_trajectories] * num_trajectories,
1).astype(np.float32)[0]
else:
dataset['initial_weights'] = np.ones(num_trajectories, dtype=np.float32)
dataset['trajectories']['weights'] = []
for i in range(len(dataset['trajectories']['masks'])):
dataset['trajectories']['weights'].append(
np.ones_like(dataset['trajectories']['masks'][i]) *
dataset['initial_weights'][i])
dataset['initial_weights'] = tf.convert_to_tensor(
dataset['initial_weights'])
dataset['initial_states'] = tf.convert_to_tensor(dataset['initial_states'])
for k, v in dataset['trajectories'].items():
if 'initial' not in k:
dataset[k] = tf.convert_to_tensor(
np.concatenate(dataset['trajectories'][k], axis=0))
self.states = dataset['states']
self.actions = dataset['actions']
self.next_states = dataset['next_states']
self.masks = dataset['masks']
self.weights = dataset['weights']
self.rewards = dataset['rewards']
self.steps = dataset['steps']
self.initial_states = dataset['initial_states']
self.initial_weights = dataset['initial_weights']
self.eps = eps
self.model_filename = None
if normalize_states:
self.state_mean = tf.reduce_mean(self.states, 0)
self.state_std = tf.math.reduce_std(self.states, 0)
self.initial_states = self.normalize_states(self.initial_states)
self.states = self.normalize_states(self.states)
self.next_states = self.normalize_states(self.next_states)
else:
self.state_mean = 0.0
self.state_std = 1.0
if normalize_rewards:
self.reward_mean = tf.reduce_mean(self.rewards)
if tf.reduce_min(self.masks) == 0.0:
self.reward_mean = tf.zeros_like(self.reward_mean)
self.reward_std = tf.math.reduce_std(self.rewards)
self.rewards = self.normalize_rewards(self.rewards)
else:
self.reward_mean = 0.0
self.reward_std = 1.0
# pylint: enable=super-init-not-called
| {
"content_hash": "7db48626cb461c8fc3c533e27da6fcf3",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 119,
"avg_line_length": 35.426791277258566,
"alnum_prop": 0.6180091452690819,
"repo_name": "google-research/google-research",
"id": "2480e378dc655f4c13680da01a5bf78568839611",
"size": "11980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "policy_eval/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
def unpacked_pdu_opts(unpacked_pdu):
pdu_opts = {}
for opt in unpacked_pdu['body'].get('optional_parameters', []):
pdu_opts[opt['tag']] = opt['value']
return pdu_opts
def detect_ussd(pdu_opts):
# TODO: Push this back to python-smpp?
return ('ussd_service_op' in pdu_opts)
def update_ussd_pdu(sm_pdu, continue_session, session_info=None):
if session_info is None:
session_info = '0000'
session_info = "%04x" % (int(session_info, 16) + int(not continue_session))
sm_pdu.add_optional_parameter('ussd_service_op', '02')
sm_pdu.add_optional_parameter('its_session_info', session_info)
return sm_pdu
| {
"content_hash": "83e1320642594ad2fc5826642dd14173",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 34.421052631578945,
"alnum_prop": 0.6529051987767585,
"repo_name": "vishwaprakashmishra/xmatrix",
"id": "e9203b91e908245a9358ef402457d363343b6289",
"size": "654",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vumi/transports/smpp/smpp_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2968329"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from typing import Any, List
from django.conf import settings
from django.core.management.base import CommandError
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.send_email import send_custom_email
from zerver.models import Realm, UserProfile
class Command(ZulipBaseCommand):
help = """
Send a custom email with Zulip branding to the specified users.
Useful to send a notice to all users of a realm or server.
The From and Subject headers can be provided in the body of the Markdown
document used to generate the email, or on the command line."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--entire-server", action="store_true", help="Send to every user on the server."
)
parser.add_argument(
"--all-sponsored-org-admins",
action="store_true",
help="Send to all organization administrators of sponsored organizations.",
)
parser.add_argument(
"--marketing",
action="store_true",
help="Send to active users and realm owners with the enable_marketing_emails setting enabled.",
)
parser.add_argument(
"--remote-servers",
action="store_true",
help="Send to registered contact email addresses for remote Zulip servers.",
)
parser.add_argument(
"--markdown-template-path",
"--path",
required=True,
help="Path to a Markdown-format body for the email.",
)
parser.add_argument(
"--subject",
help="Subject for the email. It can be declared in Markdown file in headers",
)
parser.add_argument(
"--from-name",
help="From line for the email. It can be declared in Markdown file in headers",
)
parser.add_argument("--reply-to", help="Optional reply-to line for the email")
parser.add_argument(
"--admins-only", help="Send only to organization administrators", action="store_true"
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Prints emails of the recipients and text of the email.",
)
self.add_user_list_args(
parser,
help="Email addresses of user(s) to send emails to.",
all_users_help="Send to every user on the realm.",
)
self.add_realm_args(parser)
def handle(self, *args: Any, **options: str) -> None:
target_emails: List[str] = []
users: List[UserProfile] = []
if options["entire_server"]:
users = UserProfile.objects.filter(
is_active=True, is_bot=False, is_mirror_dummy=False, realm__deactivated=False
)
elif options["marketing"]:
# Marketing email sent at most once to each email address for users
# who are recently active (!long_term_idle) users of the product.
users = UserProfile.objects.filter(
is_active=True,
is_bot=False,
is_mirror_dummy=False,
realm__deactivated=False,
enable_marketing_emails=True,
long_term_idle=False,
).distinct("delivery_email")
elif options["remote_servers"]:
from zilencer.models import RemoteZulipServer
# TODO: Make this filter for deactivated=False once we add
# that to the data model.
target_emails = list(
set(RemoteZulipServer.objects.all().values_list("contact_email", flat=True))
)
elif options["all_sponsored_org_admins"]:
# Sends at most one copy to each email address, even if it
# is an administrator in several organizations.
sponsored_realms = Realm.objects.filter(
plan_type=Realm.PLAN_TYPE_STANDARD_FREE, deactivated=False
)
admin_roles = [UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER]
users = UserProfile.objects.filter(
is_active=True,
is_bot=False,
is_mirror_dummy=False,
role__in=admin_roles,
realm__deactivated=False,
realm__in=sponsored_realms,
).distinct("delivery_email")
else:
realm = self.get_realm(options)
try:
users = self.get_users(options, realm, is_bot=False)
except CommandError as error:
if str(error) == "You have to pass either -u/--users or -a/--all-users.":
raise CommandError(
"You have to pass -u/--users or -a/--all-users or --entire-server."
)
raise error
# Only email users who've agreed to the terms of service.
if settings.TERMS_OF_SERVICE_VERSION is not None:
# We need to do a new query because the `get_users` path
# passes us a list rather than a QuerySet.
users = (
UserProfile.objects.select_related()
.filter(id__in=[u.id for u in users])
.exclude(tos_version=None)
)
send_custom_email(users, target_emails=target_emails, options=options)
if options["dry_run"]:
print("Would send the above email to:")
for user in users:
print(f" {user.delivery_email} ({user.realm.string_id})")
for email in target_emails:
print(f" {email}")
| {
"content_hash": "c080717399daeff4ad83d228fd2de29f",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 107,
"avg_line_length": 40.80714285714286,
"alnum_prop": 0.5739541396814283,
"repo_name": "eeshangarg/zulip",
"id": "af5131dfd7d13e3c1eff0583e3b3b47a1ff7c328",
"size": "5713",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/management/commands/send_custom_email.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
'''
Copyright (C) Yadu Nand B <yadudoc1729@gmail.com> - All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
Written by Yadu Nand B <yadudoc1729@gmail.com>, September 2015
'''
import os
import logging
import bottle
import requests
import time
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
import boto.dynamodb2 as ddb
import boto.ec2
import boto.sqs
import boto.sns
import boto.ses
import boto.ec2.autoscale
from bottle import app, template
from boto.s3.connection import S3Connection
from datetime import datetime
from datetime import date
from dateutil.relativedelta import relativedelta
log_levels = { "DEBUG" : logging.DEBUG,
"INFO" : logging.INFO,
"WARNING" : logging.WARNING,
"ERROR" : logging.ERROR,
"CRITICAL": logging.CRITICAL
}
# Returns true if the credentials were updated
def update_creds_from_metadata_server(app):
#Todo error check for timeout errors from http access
#TOdo error catch for json decode failure
if "keys.expiry" in app.config and app.config["keys.expiry"] > (datetime.now() + relativedelta(hours=1)):
logging.debug("Update creds from metadata cancelled {0} < {1}".format(
app.config["keys.expiry"],
datetime.now()))
return False
URL = app.config["metadata.credurl"]
role = requests.get(URL).content
URL = URL + role
data = requests.get(URL).json()
app.config["keys.expiry"] = datetime.strptime(str(data['Expiration']), '%Y-%m-%dT%H:%M:%SZ')
app.config["keys.key_id"] = str(data['AccessKeyId'])
app.config["keys.key_secret"] = str(data['SecretAccessKey'])
app.config["keys.key_token"] = str(data['Token'])
URL = app.config["metadata.metaserver"]
data = requests.get(URL+"instance-id").text
app.config["instance_id"] = str(data)
URL = app.config["metadata.metaserver"]
data = requests.get(URL+"instance-type").text
app.config["instance_type"] = str(data)
URL = app.config["metadata.metaserver"]
data = requests.get(URL+"placement/availability-zone/").text
app.config["region"] = str(data)
URL = app.config["metadata.metaidentity"]
data = requests.get(URL).json()
app.config["identity"] = data
if "doReload" in app.config and app.config["doReload"] == True:
init(app)
return True
##################################################################
# Annoy human with email
##################################################################
def send_success_mail(data, app):
sesconn = app.config['ses.conn']
job_id = data.get('job_id')
rec_email = data.get('user_email')
rec_name = data.get('username')
src_email = app.config['ses.email_sender']
url = app.config['server.url']
body = template('./templates/completion_email.tpl',
username=rec_name,
job_id=job_id,
url=url)
try:
st = sesconn.send_email(src_email,
"[Turing] Your Job has completed",
body,
[rec_email])
except Exception as e:
return False
return True
##################################################################
# Verify and add user to email list
##################################################################
def verify_email(app, email):
sesconn = app.config['ses.conn']
st = sesconn.verify_email_address(email)
print st
return st
##################################################################
# Send condolences for job failure
##################################################################
def send_failure_mail(data, app):
sesconn = app.config['ses.conn']
job_id = data.get('job_id')
rec_email = data.get('user_email')
rec_name = data.get('username')
src_email = app.config['ses.email_sender']
url = app.config['server.url']
body = template('./templates/failure_email.tpl',
username=rec_name,
job_id=job_id,
url=url)
try:
st = sesconn.send_email(src_email,
"[Turing] Your Job has failed",
body,
[rec_email])
except Exception, e:
return False
return True
def init(app):
ec2 = boto.ec2.connect_to_region(app.config["identity"]['region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
#print "instance id : ", app.config["instance_id"]
# Get meta tags
reservation = ec2.get_all_instances(instance_ids=app.config["instance_id"])
meta_tags = {}
if reservation :
for tag in reservation[0].instances[0].tags:
meta_tags[str(tag)] = str(reservation[0].instances[0].tags[tag])
#print str(tag), str(reservation[0].instances[0].tags[tag])
#meta_tags = {}
#for tag in ec2.get_all_tags():
# meta_tags[str(tag.name)] = str(tag.value)
# Log the metadata tags
app.config["instance.tags"] = meta_tags
for k in meta_tags:
logging.debug("[TAGS] {0} : {1}".format(k, meta_tags[k]))
sqs = boto.sqs.connect_to_region(app.config["identity"]['region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
sns = boto.sns.connect_to_region(app.config["identity"]['region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
ses = boto.ses.connect_to_region(app.config["identity"]['region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
scale= boto.ec2.autoscale.AutoScaleConnection(aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
s3 = S3Connection(aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
dyno = Table(app.config["instance.tags"]["DynamoDBTableName"],#app.config['dynamodb.table_name'],
schema=[HashKey("job_id")],
connection=ddb.connect_to_region(app.config['dynamodb.region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token']))
app.config["ec2.conn"] = ec2
app.config["sns.conn"] = sns
app.config["sqs.conn"] = sqs
app.config["ses.conn"] = ses
app.config["s3.conn"] = s3
app.config["scale.conn"]= scale
app.config["dyno.conn"] = dyno
app.config["doReload"] = True
return app
def connect_to_dynamodb(app):
stat = update_creds_from_metadata_server(app)
# If an entry exists for the table and the credentials have
# not been updated then skip the connection
if "dynamodb.table" in app.config and not stat:
return app
dbconn = ddb.connect_to_region(app.config['dynamodb.region'],
aws_access_key_id=app.config['keys.key_id'],
aws_secret_access_key=app.config['keys.key_secret'],
security_token=app.config['keys.key_token'])
dyno = Table(app.config["instance.tags"]["DynamoDBTableName"], #app.config['dynamodb.table_name'],
schema=[HashKey("CustomerUUID")],
connection=dbconn)
app.config["dynamodb.table"] = dyno
return app
def load_configs(filename):
app = bottle.default_app()
try:
app.config.load_config(filename)
except Exception as e:
logging.error("Exception {0} in load_config".format(e))
exit(-1)
logging.debug("Config : \n {0}".format(app.config))
for keys in app.config:
if keys.startswith('keys.'):
print keys
keyfile = app.config[keys].replace('\"', '')
logging.debug("Keyfile : {0}".format(keyfile.replace('\"', '')))
if not os.path.isfile(keyfile):
print "Key file {0} missing!".format(keyfile)
logging.error("Key file {0} missing!".format(keyfile))
exit(-1)
with open(keyfile, 'r') as kf:
ks = kf.readlines()
sp = ks[1].split(',')
app.config[keys] = kf.read()
app.config["keys.key_id"] = sp[1]
app.config["keys.key_secret"] = sp[2]
app.config["keys.key_token"] = ''
#print "keys : ", app.config[keys]
if 'metadata.credurl' in app.config:
update_creds_from_metadata_server(app)
init(app)
return app
| {
"content_hash": "91cab4a2208bbb3a1b5de1430345fa1a",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 118,
"avg_line_length": 38.6796875,
"alnum_prop": 0.5362553019592001,
"repo_name": "yadudoc/cloud_kotta",
"id": "82ad78e346ad254a19954676f7b68d6a41c868cd",
"size": "9924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3558"
},
{
"name": "HTML",
"bytes": "1355"
},
{
"name": "JavaScript",
"bytes": "8960"
},
{
"name": "Python",
"bytes": "230490"
},
{
"name": "Shell",
"bytes": "3495"
},
{
"name": "Smarty",
"bytes": "44608"
}
],
"symlink_target": ""
} |
import os
import time
from telemetry import wait_action
from telemetry import tab_test_case
class WaitActionTest(tab_test_case.TabTestCase):
def testWaitAction(self):
unittest_data_dir = os.path.join(os.path.dirname(__file__),
'..', 'unittest_data')
self._browser.SetHTTPServerDirectory(unittest_data_dir)
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
i = wait_action.WaitAction({ 'duration' : 1 })
start_time = time.time()
i.RunAction({}, self._tab)
self.assertAlmostEqual(time.time() - start_time, 1, places=2)
| {
"content_hash": "2060106a4f9d8d944d41a884413192bb",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 68,
"avg_line_length": 33.91304347826087,
"alnum_prop": 0.6653846153846154,
"repo_name": "nacl-webkit/chrome_deps",
"id": "ce00d7976b0cb0ee811f0fe89e15b862aa5c4363",
"size": "946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/wait_action_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1173441"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "74568368"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "156174457"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3088381"
},
{
"name": "JavaScript",
"bytes": "18179048"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "3044"
},
{
"name": "Objective-C",
"bytes": "6965520"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932725"
},
{
"name": "Python",
"bytes": "8458718"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1526176"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XSLT",
"bytes": "13493"
}
],
"symlink_target": ""
} |
import random
import eventlet
import mock
from neutron_lib import constants as n_const
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
from neutron.agent.linux import interface
from neutron.common import utils
from neutron.conf.agent import common as agent_config
from neutron.conf import common as common_config
from neutron.conf.plugins.ml2.drivers import agent
from neutron.conf.plugins.ml2.drivers import ovs_conf
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import br_int
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import br_phys
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \
import br_tun
from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \
as ovs_agent
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import base
class OVSAgentTestFramework(base.BaseOVSLinuxTestCase):
def setUp(self):
super(OVSAgentTestFramework, self).setUp()
agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.'
'ovs_neutron_agent.OVSPluginApi')
mock.patch(agent_rpc).start()
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.br_int = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
prefix='br-int')
self.br_tun = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
prefix='br-tun')
self.br_phys = utils.get_rand_name(n_const.DEVICE_NAME_MAX_LEN,
prefix='br-phys')
patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun")
self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:]
self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:]
self.ovs = ovs_lib.BaseOVS()
self.config = self._configure_agent()
self.driver = interface.OVSInterfaceDriver(self.config)
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
agent.register_agent_opts(config)
ovs_conf.register_ovs_agent_opts(config)
agent_config.register_interface_opts(config)
agent_config.register_interface_driver_opts_helper(config)
agent_config.register_agent_state_opts_helper(config)
ext_manager.register_opts(config)
return config
def _configure_agent(self):
config = self._get_config_opts()
config.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
config.set_override('integration_bridge', self.br_int, "OVS")
config.set_override('ovs_integration_bridge', self.br_int)
config.set_override('tunnel_bridge', self.br_tun, "OVS")
config.set_override('int_peer_patch_port', self.patch_tun, "OVS")
config.set_override('tun_peer_patch_port', self.patch_int, "OVS")
config.set_override('host', 'ovs-agent')
return config
def _bridge_classes(self):
return {
'br_int': br_int.OVSIntegrationBridge,
'br_phys': br_phys.OVSPhysicalBridge,
'br_tun': br_tun.OVSTunnelBridge
}
def create_agent(self, create_tunnels=True, ancillary_bridge=None,
local_ip='192.168.10.1'):
if create_tunnels:
tunnel_types = [n_const.TYPE_VXLAN]
else:
tunnel_types = None
bridge_mappings = ['physnet:%s' % self.br_phys]
self.config.set_override('tunnel_types', tunnel_types, "AGENT")
self.config.set_override('polling_interval', 1, "AGENT")
self.config.set_override('local_ip', local_ip, "OVS")
self.config.set_override('bridge_mappings', bridge_mappings, "OVS")
# Physical bridges should be created prior to running
self._bridge_classes()['br_phys'](self.br_phys).create()
ext_mgr = ext_manager.L2AgentExtensionsManager(self.config)
agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(),
ext_mgr, self.config)
self.addCleanup(self.ovs.delete_bridge, self.br_int)
if tunnel_types:
self.addCleanup(self.ovs.delete_bridge, self.br_tun)
self.addCleanup(self.ovs.delete_bridge, self.br_phys)
agent.sg_agent = mock.Mock()
agent.ancillary_brs = []
if ancillary_bridge:
agent.ancillary_brs.append(ancillary_bridge)
return agent
def _mock_get_events(self, agent, polling_manager, ports):
get_events = polling_manager.get_events
p_ids = [p['id'] for p in ports]
def filter_events():
events = get_events()
filtered_ports = []
for dev in events['added']:
iface_id = agent.int_br.portid_from_external_ids(
dev.get('external_ids', []))
if iface_id in p_ids:
# if the event is not about a port that was created by
# this test, we filter the event out. Since these tests are
# not run in isolation processing all the events might make
# some test fail ( e.g. the agent might keep resycing
# because it keeps finding not ready ports that are created
# by other tests)
filtered_ports.append(dev)
return {'added': filtered_ports, 'removed': events['removed']}
polling_manager.get_events = mock.Mock(side_effect=filter_events)
def stop_agent(self, agent, rpc_loop_thread):
agent.run_daemon_loop = False
rpc_loop_thread.wait()
def start_agent(self, agent, ports=None, unplug_ports=None):
if unplug_ports is None:
unplug_ports = []
if ports is None:
ports = []
self.setup_agent_rpc_mocks(agent, unplug_ports)
polling_manager = polling.InterfacePollingMinimizer()
self._mock_get_events(agent, polling_manager, ports)
self.addCleanup(polling_manager.stop)
polling_manager.start()
utils.wait_until_true(
polling_manager._monitor.is_active)
agent.check_ovs_status = mock.Mock(
return_value=constants.OVS_NORMAL)
self.agent_thread = eventlet.spawn(agent.rpc_loop,
polling_manager)
self.addCleanup(self.stop_agent, agent, self.agent_thread)
return polling_manager
def _create_test_port_dict(self):
return {'id': uuidutils.generate_uuid(),
'mac_address': net.get_random_mac(
'fa:16:3e:00:00:00'.split(':')),
'fixed_ips': [{
'ip_address': '10.%d.%d.%d' % (
random.randint(3, 254),
random.randint(3, 254),
random.randint(3, 254))}],
'vif_name': utils.get_rand_name(
self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)}
def _create_test_network_dict(self):
return {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid()}
def _plug_ports(self, network, ports, agent,
bridge=None, namespace=None):
if namespace is None:
namespace = self.namespace
for port in ports:
bridge = bridge or agent.int_br
self.driver.plug(
network.get('id'), port.get('id'), port.get('vif_name'),
port.get('mac_address'),
bridge.br_name, namespace=namespace)
ip_cidrs = ["%s/8" % (port.get('fixed_ips')[0][
'ip_address'])]
self.driver.init_l3(port.get('vif_name'), ip_cidrs,
namespace=namespace)
def _unplug_ports(self, ports, agent):
for port in ports:
self.driver.unplug(
port.get('vif_name'), agent.int_br.br_name, self.namespace)
def _get_device_details(self, port, network):
dev = {'device': port['id'],
'port_id': port['id'],
'network_id': network['id'],
'network_type': network.get('network_type', 'vlan'),
'physical_network': network.get('physical_network', 'physnet'),
'segmentation_id': network.get('segmentation_id', 1),
'fixed_ips': port['fixed_ips'],
'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX,
'admin_state_up': True}
return dev
def assert_bridge(self, br, exists=True):
self.assertEqual(exists, self.ovs.bridge_exists(br))
def assert_patch_ports(self, agent):
def get_peer(port):
return agent.int_br.db_get_val(
'Interface', port, 'options', check_error=True)
utils.wait_until_true(
lambda: get_peer(self.patch_int) == {'peer': self.patch_tun})
utils.wait_until_true(
lambda: get_peer(self.patch_tun) == {'peer': self.patch_int})
def assert_bridge_ports(self):
for port in [self.patch_tun, self.patch_int]:
self.assertTrue(self.ovs.port_exists(port))
def assert_vlan_tags(self, ports, agent):
for port in ports:
res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag')
self.assertTrue(res)
def _expected_plugin_rpc_call(self, call, expected_devices, is_up=True):
"""Helper to check expected rpc call are received
:param call: The call to check
:param expected_devices: The device for which call is expected
:param is_up: True if expected_devices are devices that are set up,
False if expected_devices are devices that are set down
"""
if is_up:
rpc_devices = [
dev for args in call.call_args_list for dev in args[0][1]]
else:
rpc_devices = [
dev for args in call.call_args_list for dev in args[0][2]]
for dev in rpc_devices:
if dev in expected_devices:
expected_devices.remove(dev)
# reset mock otherwise if the mock is called again the same call param
# will be processed again
call.reset_mock()
return not expected_devices
def create_test_ports(self, amount=3, **kwargs):
ports = []
for x in range(amount):
ports.append(self._create_test_port_dict(**kwargs))
return ports
def _mock_update_device(self, context, devices_up, devices_down, agent_id,
host=None):
dev_up = []
dev_down = []
for port in self.ports:
if devices_up and port['id'] in devices_up:
dev_up.append(port['id'])
if devices_down and port['id'] in devices_down:
dev_down.append({'device': port['id'], 'exists': True})
return {'devices_up': dev_up,
'failed_devices_up': [],
'devices_down': dev_down,
'failed_devices_down': []}
def setup_agent_rpc_mocks(self, agent, unplug_ports):
def mock_device_details(context, devices, agent_id, host=None):
details = []
for port in self.ports:
if port['id'] in devices:
dev = self._get_device_details(
port, self.network)
details.append(dev)
ports_to_unplug = [x for x in unplug_ports if x['id'] in devices]
if ports_to_unplug:
self._unplug_ports(ports_to_unplug, self.agent)
return {'devices': details, 'failed_devices': []}
(agent.plugin_rpc.get_devices_details_list_and_failed_devices.
side_effect) = mock_device_details
agent.plugin_rpc.update_device_list.side_effect = (
self._mock_update_device)
def _prepare_resync_trigger(self, agent):
def mock_device_raise_exception(context, devices_up, devices_down,
agent_id, host=None):
agent.plugin_rpc.update_device_list.side_effect = (
self._mock_update_device)
raise Exception('Exception to trigger resync')
self.agent.plugin_rpc.update_device_list.side_effect = (
mock_device_raise_exception)
def _prepare_failed_dev_up_trigger(self, agent):
def mock_failed_devices_up(context, devices_up, devices_down,
agent_id, host=None):
failed_devices = []
devices = list(devices_up)
# first port fails
if self.ports[0]['id'] in devices_up:
# reassign side_effect so that next RPC call will succeed
agent.plugin_rpc.update_device_list.side_effect = (
self._mock_update_device)
devices.remove(self.ports[0]['id'])
failed_devices.append(self.ports[0]['id'])
return {'devices_up': devices,
'failed_devices_up': failed_devices,
'devices_down': [],
'failed_devices_down': []}
self.agent.plugin_rpc.update_device_list.side_effect = (
mock_failed_devices_up)
def _prepare_failed_dev_down_trigger(self, agent):
def mock_failed_devices_down(context, devices_up, devices_down,
agent_id, host=None):
# first port fails
failed_port_id = self.ports[0]['id']
failed_devices_down = []
dev_down = [
{'device': p['id'], 'exists': True}
for p in self.ports if p['id'] in devices_down and (
p['id'] != failed_port_id)]
# check if it's the call to set devices down and if the device
# that is supposed to fail is in the call then modify the
# side_effect so that next RPC call will succeed.
if devices_down and failed_port_id in devices_down:
agent.plugin_rpc.update_device_list.side_effect = (
self._mock_update_device)
failed_devices_down.append(failed_port_id)
return {'devices_up': devices_up,
'failed_devices_up': [],
'devices_down': dev_down,
'failed_devices_down': failed_devices_down}
self.agent.plugin_rpc.update_device_list.side_effect = (
mock_failed_devices_down)
def wait_until_ports_state(self, ports, up, timeout=60):
port_ids = [p['id'] for p in ports]
utils.wait_until_true(
lambda: self._expected_plugin_rpc_call(
self.agent.plugin_rpc.update_device_list, port_ids, up),
timeout=timeout)
def setup_agent_and_ports(self, port_dicts, create_tunnels=True,
ancillary_bridge=None,
trigger_resync=False,
failed_dev_up=False,
failed_dev_down=False,
network=None):
self.ports = port_dicts
self.agent = self.create_agent(create_tunnels=create_tunnels,
ancillary_bridge=ancillary_bridge)
self.polling_manager = self.start_agent(self.agent, ports=self.ports)
self.network = network or self._create_test_network_dict()
if trigger_resync:
self._prepare_resync_trigger(self.agent)
elif failed_dev_up:
self._prepare_failed_dev_up_trigger(self.agent)
elif failed_dev_down:
self._prepare_failed_dev_down_trigger(self.agent)
self._plug_ports(self.network, self.ports, self.agent,
bridge=ancillary_bridge)
def plug_ports_to_phys_br(self, network, ports, namespace=None):
physical_network = network.get('physical_network', 'physnet')
phys_segmentation_id = network.get('segmentation_id', None)
network_type = network.get('network_type', 'flat')
phys_br = self.agent.phys_brs[physical_network]
self._plug_ports(network, ports, self.agent, bridge=phys_br,
namespace=namespace)
if network_type == 'flat':
# NOTE(slaweq): for OVS implementations remove the DEAD VLAN tag
# on ports that belongs to flat network. DEAD VLAN tag is added
# to each newly created port. This is related to lp#1767422
for port in ports:
phys_br.clear_db_attribute("Port", port['vif_name'], "tag")
elif phys_segmentation_id and network_type == 'vlan':
for port in ports:
phys_br.set_db_attribute(
"Port", port['vif_name'], "tag", phys_segmentation_id)
| {
"content_hash": "e8a90e63efcf2f99d791fe40381e4b89",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 79,
"avg_line_length": 44.34271099744245,
"alnum_prop": 0.578844157342254,
"repo_name": "noironetworks/neutron",
"id": "827a516a8168261d6c8808dbea2bba3fa71810fc",
"size": "18017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/agent/l2/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11420614"
},
{
"name": "Shell",
"bytes": "38791"
}
],
"symlink_target": ""
} |
"""Base Model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import os
import re
import six
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
import tensorflow.compat.v2 as tf2
from dataloader import mode_keys
from modeling import learning_rates
from utils import benchmark_utils
def build_assignment_map(checkpoint_path,
prefix=None,
skip_variables_regex=None):
"""Generate assignment map for loading checkpoints."""
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
checkpoint_variable_map = {
name: shape for name, shape in tf.train.list_variables(checkpoint_path)
}
if not prefix:
prefix = ''
assignment_map = {}
incompatible_variables = set()
for var in all_vars:
var_name = var.name
# Trim the index of the variable.
if ':' in var_name:
var_name = var_name[:var_name.rindex(':')]
if skip_variables_regex and re.match(skip_variables_regex,
var_name[len(prefix):]):
continue
var_name_in_target_ckpt = var_name[len(prefix):]
# Skip variables in checkpoints with incompatible shapes, otherwise errors
# will happen when loading checkpoints.
if var_name_in_target_ckpt in checkpoint_variable_map and var.get_shape(
).is_compatible_with(checkpoint_variable_map[var_name_in_target_ckpt]):
assignment_map[var_name_in_target_ckpt] = var
else:
incompatible_variables.add(var_name_in_target_ckpt)
tf.logging.info('The following variables are not initialized: %s',
incompatible_variables)
return assignment_map
def filter_variables(variables, variable_regex, is_whitelist):
"""Filter a list of variables based on the regex.
Args:
variables: a list of tf.Variable to be filtered.
variable_regex: a regex specifying the filtering rule.
is_whitelist: a bool. If True, indicate `variable_regex` specifies the
variables to keep. If False, indicate `variable_regex` specfieis the
variables to discard.
Returns:
filtered_variables: a list of tf.Variable after filtering.
"""
if is_whitelist:
filtered_variables = [
v for v in variables if variable_regex is None or
re.match(variable_regex, v.name)
]
else:
filtered_variables = [
v for v in variables if variable_regex is None or
not re.match(variable_regex, v.name)
]
return filtered_variables
def filter_trainable_variables(variables, frozen_variable_prefix):
"""Filter and retrun trainable variables."""
return filter_variables(
variables, frozen_variable_prefix, is_whitelist=False)
def filter_regularization_variables(variables, regularization_variable_regex):
"""Filter and return regularization variables."""
return filter_variables(
variables, regularization_variable_regex, is_whitelist=True)
class OptimizerFactory(object):
"""Class to generate optimizer function."""
def __init__(self, params):
"""Creates optimized based on the specified flags."""
if params.type == 'momentum':
self._optimizer = functools.partial(
tf.train.MomentumOptimizer, momentum=params.momentum)
elif params.type == 'adam':
self._optimizer = tf.train.AdamOptimizer
elif params.type == 'adadelta':
self._optimizer = tf.train.AdadeltaOptimizer
elif params.type == 'adagrad':
self._optimizer = tf.train.AdagradOptimizer
elif params.type == 'rmsprop':
self._optimizer = functools.partial(
tf.train.RMSPropOptimizer,
momentum=params.momentum, decay=0.9, epsilon=0.001)
else:
raise ValueError('Unsupported optimizer type %s.' % self._optimizer)
def __call__(self, learning_rate):
return self._optimizer(learning_rate)
class BaseModel(six.with_metaclass(abc.ABCMeta, object)):
"""Base class for model function."""
def __init__(self, params):
self._transpose_input = params.train.transpose_input
self._space_to_depth_block_size = (
params.architecture.space_to_depth_block_size)
self._use_bfloat16 = params.architecture.use_bfloat16
self._l2_weight_decay = float(params.train.l2_weight_decay)
# Optimization.
self._optimizer_fn = OptimizerFactory(params.train.optimizer)
self._learning_rate_fn = learning_rates.learning_rate_generator(
params.train.learning_rate, params.train.total_steps)
self._gradient_clip_norm = params.train.gradient_clip_norm
self._frozen_var_prefix = params.train.frozen_variable_prefix
self._regularization_var_regex = params.train.regularization_variable_regex
# Checkpoint restoration.
self._checkpoint = params.train.checkpoint.path
self._checkpoint_prefix = params.train.checkpoint.prefix
self._skip_variables_regex = params.train.checkpoint.skip_variables_regex
# Summary.
self._enable_summary = params.enable_summary
self._summaries = {}
self._image_summaries = {}
self._model_dir = params.model_dir
self._iterations_per_loop = params.train.iterations_per_loop
# Platform device.
self._use_tpu = params.use_tpu
self._skip_eval_loss = params.eval.skip_eval_loss
@abc.abstractmethod
def _build_outputs(self, images, labels, mode):
"""Implements `build_outputs`. See `build_outputs` for more details."""
pass
def build_outputs(self, images, labels, mode):
"""Builds the model forward pass and generates outputs.
It wraps the implementation in `_build_outputs` with some code to handle
bfloat16 scope.
Args:
images: a Tensor of shape [batch_size, height, width, channel],
representing the input image.
labels: a dict of Tensors that includes labels used for training/eval.
mode: one of mode_keys.TRAIN, mode_keys.EVAL, mode_keys.PREDICT.
Returns:
a dict of output tensors.
"""
if self._use_bfloat16:
with tf.tpu.bfloat16_scope():
def cast_outputs_to_float(d):
for k, v in sorted(six.iteritems(d)):
if isinstance(v, dict):
cast_outputs_to_float(v)
else:
d[k] = tf.cast(v, tf.float32)
# Casts class and box outputs to tf.float32.
outputs = self._build_outputs(images, labels, mode)
cast_outputs_to_float(outputs)
else:
outputs = self._build_outputs(images, labels, mode)
return outputs
@abc.abstractmethod
def build_losses(self, outputs, labels):
"""Builds the model loss.
Args:
outputs: a dict of output tensors produced by `build_outputs`.
labels: a dict of label tensors.
Returns:
model_loss: a scalar Tensor of model loss.
"""
pass
@abc.abstractmethod
def build_metrics(self, outputs, labels):
"""Builds the metrics used for evaluation.
Args:
outputs: a dict of output tensors produced by `build_outputs`.
labels: a dict of label tensors.
Returns:
a 2-element tuple of (metric_fn, metric_fn_inputs).
"""
pass
@abc.abstractmethod
def build_predictions(self, outputs, labels):
"""Builds the metrics used for evaluation.
It takes the output tensors from `build_outputs` and applies further
necessary post-processing to generate the prediction tensors.
Args:
outputs: a dict of output tensors produced by `build_outputs`.
labels: a dict of label tensors.
Returns:
a dict of Tensor containing all the prediction tensors.
"""
pass
def train(self, images, labels):
"""Returns a TPUEstimatorSpec for training.
Args:
images: a Tensor of shape [batch_size, height, width, channel]
representing the input image tensor.
labels: a dict of label tensors.
Returns:
a TPUEstimatorSpec object used for training.
"""
# If the input image is transposed, we need to revert it back to the
# original shape before it's used in the computation.
if self._transpose_input:
if self._space_to_depth_block_size > 1:
# HWNC -> NHWC
images = tf.transpose(images, [2, 0, 1, 3])
else:
# HWCN -> NHWC
images = tf.transpose(images, [3, 0, 1, 2])
outputs = self.build_outputs(images, labels, mode=mode_keys.TRAIN)
# Log model statistics.
batch_size = images.get_shape().as_list()[0]
if batch_size:
_, _ = benchmark_utils.compute_model_statistics(
batch_size=batch_size,
json_file_path=os.path.join(
self._model_dir, 'train_model_stats.json'))
model_loss = self.build_losses(outputs, labels)
global_step = tf.train.get_global_step()
learning_rate = self._learning_rate_fn(global_step)
self.add_scalar_summary('learning_rate', learning_rate)
# Sets up the optimizer.
optimizer = self._optimizer_fn(learning_rate)
if self._use_tpu:
optimizer = tf.tpu.CrossShardOptimizer(optimizer)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Gets all trainable variables and apply the variable filter.
train_var_list = filter_trainable_variables(
tf.trainable_variables(), self._frozen_var_prefix)
# Gets the regularization variables and apply the regularization loss.
regularization_var_list = filter_regularization_variables(
train_var_list, self._regularization_var_regex)
l2_regularization_loss = self._l2_weight_decay * tf.add_n([
tf.nn.l2_loss(v) for v in regularization_var_list])
self.add_scalar_summary('l2_regularization_loss', l2_regularization_loss)
total_loss = model_loss + l2_regularization_loss
grads_and_vars = optimizer.compute_gradients(total_loss, train_var_list)
if self._gradient_clip_norm > 0.0:
grads = [gv[0] for gv in grads_and_vars]
tvars = [gv[1] for gv in grads_and_vars]
clipped_grads, _ = tf.clip_by_global_norm(grads, self._gradient_clip_norm)
grads_and_vars = list(zip(clipped_grads, tvars))
with tf.control_dependencies(update_ops):
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
scaffold_fn = self.restore_from_checkpoint()
if self._enable_summary:
host_call_fn = self.summarize()
else:
host_call_fn = None
tpu_estimator_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=tf_estimator.ModeKeys.TRAIN,
loss=total_loss,
train_op=train_op,
host_call=host_call_fn,
scaffold_fn=scaffold_fn)
if self._use_tpu:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec()
def evaluate(self, images, labels):
"""Returns a TPUEstimatorSpec for evaluation.
Args:
images: a Tensor of shape [batch_size, height, width, channel]
representing the input image tensor.
labels: a dict of label tensors.
Returns:
a TPUEstimatorSpec object used for evaluation.
"""
outputs = self.build_outputs(images, labels, mode=mode_keys.EVAL)
# Log model statistics.
batch_size = images.get_shape().as_list()[0]
if batch_size:
_, _ = benchmark_utils.compute_model_statistics(
batch_size=batch_size,
json_file_path=os.path.join(
self._model_dir, 'eval_model_stats.json'))
if self._skip_eval_loss:
model_loss = tf.constant(0, dtype=tf.float32)
else:
model_loss = self.build_losses(outputs, labels)
eval_metrics = self.build_metrics(outputs, labels)
tpu_estimator_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=tf_estimator.ModeKeys.EVAL,
loss=model_loss,
eval_metrics=eval_metrics)
if self._use_tpu:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec()
def predict(self, features):
"""Returns a TPUEstimatorSpec for prediction.
Args:
features: a dict of Tensors including the input images and other label
tensors used for prediction.
Returns:
a TPUEstimatorSpec object used for prediction.
"""
images = features['images']
labels = features['labels']
outputs = self.build_outputs(images, labels, mode=mode_keys.PREDICT)
# Log model statistics.
batch_size = images.get_shape().as_list()[0]
if batch_size:
_, _ = benchmark_utils.compute_model_statistics(
batch_size=batch_size,
json_file_path=os.path.join(
self._model_dir, 'predict_model_stats.json'))
predictions = self.build_predictions(outputs, labels)
tpu_estimator_spec = tf_estimator.tpu.TPUEstimatorSpec(
mode=tf_estimator.ModeKeys.PREDICT,
predictions=predictions)
if self._use_tpu:
return tpu_estimator_spec
else:
return tpu_estimator_spec.as_estimator_spec()
def restore_from_checkpoint(self):
"""Returns scaffold function to restore parameters from checkpoint."""
def scaffold_fn():
"""Loads pretrained model through scaffold function."""
assignment_map = build_assignment_map(
checkpoint_path=self._checkpoint,
prefix=self._checkpoint_prefix,
skip_variables_regex=self._skip_variables_regex)
tf.logging.info('Loading checkpoint from %s using assignment_map: %s',
self._checkpoint, assignment_map)
tf.train.init_from_checkpoint(self._checkpoint, assignment_map)
return tf.train.Scaffold()
return scaffold_fn if self._checkpoint else None
def summarize(self):
"""Returns summary ops for logging."""
def host_call_fn(*flat_args):
"""Training host call. Creates scalar summaries for training metrics.
Args:
*flat_args: `list` of flat host call input tensors.
Returns:
List of summary ops to run on the CPU host.
"""
global_step, summaries, image_summaries = tf.nest.pack_sequence_as(
host_call_inputs, flat_args)
global_step = tf.reduce_mean(global_step)
with (tf2.summary.create_file_writer(
self._model_dir,
max_queue=self._iterations_per_loop).as_default()):
with tf2.summary.record_if(True):
for key, value in summaries.items():
tf2.summary.scalar(key, tf.reduce_mean(value), step=global_step)
for key, value in image_summaries.items():
tf2.summary.image(key, value, step=global_step)
return tf.summary.all_v2_summary_ops()
global_step = tf.reshape(tf.train.get_global_step()[None], [1])
host_call_inputs = [global_step, self._summaries, self._image_summaries]
return (host_call_fn, tf.nest.flatten(host_call_inputs))
def add_scalar_summary(self, name, tensor):
self._summaries[name] = tf.reshape(tensor, [1])
def add_image_summary(self, name, tensor):
self._image_summaries[name] = tensor
| {
"content_hash": "c2bdbc6e1517a40d9f54447dc4b89238",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 80,
"avg_line_length": 34.00453514739229,
"alnum_prop": 0.6701787143238197,
"repo_name": "tensorflow/tpu",
"id": "0e90996daf19173f73c3469da6cd849e06adc918",
"size": "15685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/official/detection/modeling/base_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "754301"
},
{
"name": "Dockerfile",
"bytes": "2734"
},
{
"name": "Go",
"bytes": "226317"
},
{
"name": "Jupyter Notebook",
"bytes": "56231509"
},
{
"name": "Makefile",
"bytes": "2369"
},
{
"name": "Python",
"bytes": "3444271"
},
{
"name": "Shell",
"bytes": "21032"
},
{
"name": "Starlark",
"bytes": "164"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.db import models
from us_ignite.common.fields import URL_HELP_TEXT
from us_ignite.testbeds import managers
from geoposition.fields import GeopositionField
from django_extensions.db.fields import (
AutoSlugField, CreationDateTimeField, ModificationDateTimeField)
from taggit.managers import TaggableManager
class NetworkSpeed(models.Model):
name = models.CharField(max_length=255)
slug = AutoSlugField(populate_from='name', unique=True)
def __unicode__(self):
return self.name
class Testbed(models.Model):
LOW = 1
MEDIUM = 2
HIGH = 3
EXPERIMENTATION_CHOICES = (
(LOW, u'Low'),
(MEDIUM, u'Medium'),
(HIGH, u'High'),
)
PUBLISHED = 1
DRAFT = 2
STATUS_CHOICES = (
(PUBLISHED, u'Published'),
(DRAFT, u'Draft'),
)
name = models.CharField(
max_length=255, verbose_name=u'Name of the Testbed')
slug = AutoSlugField(populate_from='name', unique=True)
summary = models.TextField(blank=True)
description = models.TextField()
contact = models.ForeignKey(
'auth.User', blank=True, null=True, on_delete=models.SET_NULL)
organization = models.ForeignKey(
'organizations.Organization', blank=True, null=True,
on_delete=models.SET_NULL)
website = models.URLField(
max_length=500, blank=True, help_text=URL_HELP_TEXT)
image = models.ImageField(blank=True, upload_to='testbed', max_length=500)
network_speed = models.ForeignKey(
'testbeds.NetworkSpeed', blank=True, null=True,
on_delete=models.SET_NULL)
connections = models.TextField(
blank=True, verbose_name=u'Connections to other networks')
experimentation = models.IntegerField(
choices=EXPERIMENTATION_CHOICES, default=MEDIUM,
verbose_name=u'Willingness to experiment')
passes_homes = models.PositiveIntegerField(
default=0, verbose_name=u'Estimated passes # homes')
passes_business = models.PositiveIntegerField(
default=0, verbose_name=u'Estimated passes # business')
passes_anchor = models.PositiveIntegerField(
default=0, verbose_name=u'Estimated passes # community anchor')
is_advanced = models.BooleanField(
default=False, help_text=u'Does it have advanced characteristics?')
hubs = models.ManyToManyField(
'hubs.Hub', blank=True, verbose_name=u'Communities')
applications = models.ManyToManyField(
'apps.Application', blank=True, verbose_name=u'Applications being '
'piloted')
features = models.ManyToManyField(
'apps.Feature', blank=True, help_text=u'Existing NextGen features in '
'this community.')
position = GeopositionField(blank=True)
tags = TaggableManager(blank=True)
status = models.IntegerField(choices=STATUS_CHOICES, default=DRAFT)
created = CreationDateTimeField()
modified = ModificationDateTimeField()
# Managers:
objects = models.Manager()
active = managers.TestbedActiveManager()
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('testbed_detail', args=[self.slug])
def get_edit_url(self):
return reverse('admin:testbeds_testbed_change', args=[self.pk])
def is_contact(self, user):
return self.contact == user
def is_published(self):
return self.status == self.PUBLISHED
def is_draft(self):
return self.status == self.DRAFT
def is_visible_by(self, user):
return self.is_published() or self.is_contact(user)
def is_editable_by(self, user):
"""Only editable in the admin section."""
return user and user.is_superuser
| {
"content_hash": "6b3a51b911f241a5765252856fa0d5a2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 78,
"avg_line_length": 35.301886792452834,
"alnum_prop": 0.6795831106360235,
"repo_name": "us-ignite/us_ignite",
"id": "f090c500c90ad93b0ce4a77592f14c0740eb0ae1",
"size": "3742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "us_ignite/testbeds/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "590320"
},
{
"name": "HTML",
"bytes": "920235"
},
{
"name": "JavaScript",
"bytes": "109759"
},
{
"name": "Nginx",
"bytes": "3047"
},
{
"name": "Pascal",
"bytes": "48"
},
{
"name": "Puppet",
"bytes": "53455"
},
{
"name": "Python",
"bytes": "1321882"
},
{
"name": "Ruby",
"bytes": "370509"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
} |
import functools
import itertools
import contextlib
import weakref
import logging
l = logging.getLogger("simuvex.s_state")
import claripy
import ana
from archinfo import arch_from_id
def arch_overrideable(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
if hasattr(self.arch, f.__name__):
arch_f = getattr(self.arch, f.__name__)
return arch_f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return wrapped_f
from .plugins import default_plugins
# This is a counter for the state-merging symbolic variables
merge_counter = itertools.count()
class SimState(ana.Storable): # pylint: disable=R0904
"""
The SimState represents the state of a program, including its memory, registers, and so forth.
"""
def __init__(self, arch="AMD64", plugins=None, memory_backer=None, permissions_backer=None, mode=None, options=None,
add_options=None, remove_options=None, special_memory_filler=None, os_name=None):
# the architecture is used for function simulations (autorets) and the bitness
if isinstance(arch, str):
self.arch = arch_from_id(arch)
else:
self.arch = arch
# the options
if options is None:
if mode is None:
l.warning("SimState defaulting to symbolic mode.")
mode = "symbolic"
options = o.modes[mode]
options = set(options)
if add_options is not None:
options |= add_options
if remove_options is not None:
options -= remove_options
self.options = options
self.mode = mode
# plugins
self.plugins = { }
if plugins is not None:
for n,p in plugins.iteritems():
self.register_plugin(n, p)
if not self.has_plugin('memory'):
# we don't set the memory endness because, unlike registers, it's hard to understand
# which endness the data should be read
if o.ABSTRACT_MEMORY in self.options:
# We use SimAbstractMemory in static mode
# Convert memory_backer into 'global' region
if memory_backer is not None:
memory_backer = {'global': memory_backer}
# TODO: support permissions backer in SimAbstractMemory
self.register_plugin('memory', SimAbstractMemory(memory_backer=memory_backer, memory_id="mem"))
elif o.FAST_MEMORY in self.options:
self.register_plugin('memory', SimFastMemory(memory_backer=memory_backer, memory_id="mem"))
else:
self.register_plugin('memory', SimSymbolicMemory(memory_backer=memory_backer, permissions_backer=permissions_backer, memory_id="mem"))
if not self.has_plugin('registers'):
if o.FAST_REGISTERS in self.options:
self.register_plugin('registers', SimFastMemory(memory_id="reg", endness=self.arch.register_endness))
else:
self.register_plugin('registers', SimSymbolicMemory(memory_id="reg", endness=self.arch.register_endness))
# OS name
self.os_name = os_name
# This is used in static mode as we don't have any constraints there
self._satisfiable = True
# states are big, so let's give them UUIDs for ANA right away to avoid
# extra pickling
self.make_uuid()
self.uninitialized_access_handler = None
self._special_memory_filler = special_memory_filler
# this is a global condition, applied to all added constraints, memory reads, etc
self._global_condition = None
self.ip_constraints = []
def _ana_getstate(self):
s = dict(ana.Storable._ana_getstate(self))
s['plugins'] = { k:v for k,v in s['plugins'].iteritems() if k not in ('inspector', 'regs', 'mem') }
return s
def _ana_setstate(self, s):
ana.Storable._ana_setstate(self, s)
for p in self.plugins.values():
p.set_state(self._get_weakref() if not isinstance(p, SimAbstractMemory) else self)
def _get_weakref(self):
return weakref.proxy(self)
def _get_strongref(self):
return self
#
# Some temporary backwards compatibility
#
def BV(self, name, size=None, explicit_name=None):
l.critical("DEPRECATION WARNING: SimState.BV() has been deprecated and will soon be removed. Please use state.se.BVS() or claripy.BVS().")
print "DEPRECATION WARNING: SimState.BV() has been deprecated and will soon be removed. Please use state.se.BVS() or claripy.BVS()."
return self.se.BVS(name, self.arch.bits if size is None else size, explicit_name=explicit_name)
def BVV(self, value, size=None):
l.critical("DEPRECATION WARNING: SimState.BVV() has been deprecated and will soon be removed. Please use state.se.BVV().")
print "DEPRECATION WARNING: SimState.BVV() has been deprecated and will soon be removed. Please use state.se.BVV()."
return self.se.BVV(value, size=self.arch.bits if size is None and not isinstance(value, str) else size)
#
# Easier access to some properties
#
@property
def ip(self):
"""
Get the instruction pointer expression.
:return: an expression
"""
return self.regs.ip
@ip.setter
def ip(self, val):
self.regs.ip = val
#
# Plugin accessors
#
def __getattr__(self, v):
try:
return self.get_plugin(v)
except KeyError:
raise AttributeError(v)
@property
def memory(self):
return self.get_plugin('memory')
@property
def registers(self):
return self.get_plugin('registers')
@property
def se(self):
return self.get_plugin('solver_engine')
@property
def inspect(self):
return self.get_plugin('inspector')
@property
def log(self):
return self.get_plugin('log')
@property
def scratch(self):
return self.get_plugin('scratch')
@property
def posix(self):
return self.get_plugin('posix')
@property
def libc(self):
return self.get_plugin('libc')
@property
def cgc(self):
return self.get_plugin('cgc')
@property
def regs(self):
return self.get_plugin('regs')
@property
def mem(self):
return self.get_plugin('mem')
@property
def gdb(self):
return self.get_plugin('gdb')
@property
def procedure_data(self):
return self.get_plugin('procedure_data')
@property
def uc_manager(self):
return self.get_plugin('uc_manager')
@property
def unicorn(self):
return self.get_plugin('unicorn')
def _inspect(self, *args, **kwargs):
if self.has_plugin('inspector'):
self.inspect.action(*args, **kwargs)
def _inspect_getattr(self, attr, default_value):
if self.has_plugin('inspector'):
if hasattr(self.inspect, attr):
return getattr(self.inspect, attr)
return default_value
#
# Plugins
#
def has_plugin(self, name):
return name in self.plugins
def get_plugin(self, name):
if name not in self.plugins:
p = default_plugins[name]()
self.register_plugin(name, p)
return p
return self.plugins[name]
def register_plugin(self, name, plugin):
#l.debug("Adding plugin %s of type %s", name, plugin.__class__.__name__)
plugin.set_state(self._get_weakref() if not isinstance(plugin, SimAbstractMemory) else self)
self.plugins[name] = plugin
plugin.init_state()
return plugin
def release_plugin(self, name):
if name in self.plugins:
del self.plugins[name]
#
# Constraint pass-throughs
#
def simplify(self, *args): return self.se.simplify(*args)
def add_constraints(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], (list, tuple)):
raise Exception("Tuple or list passed to add_constraints!")
if o.TRACK_CONSTRAINTS in self.options and len(args) > 0:
if o.SIMPLIFY_CONSTRAINTS in self.options:
constraints = [ self.simplify(a) for a in args ]
else:
constraints = args
self._inspect('constraints', BP_BEFORE, added_constraints=constraints)
constraints = self._inspect_getattr("added_constraints", constraints)
added = self.se.add(*constraints)
self._inspect('constraints', BP_AFTER)
# add actions for the added constraints
if o.TRACK_CONSTRAINT_ACTIONS in self.options:
for c in added:
sac = SimActionConstraint(self, c)
self.log.add_action(sac)
else:
# preserve the old action logic for when we don't track constraints (why?)
if (
'action' in kwargs and kwargs['action'] and
o.TRACK_CONSTRAINT_ACTIONS in self.options and len(args) > 0
):
for arg in args:
if self.se.symbolic(arg):
sac = SimActionConstraint(self, arg)
self.log.add_action(sac)
if o.ABSTRACT_SOLVER in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
if self.se.is_true(arg):
continue
# `is_true` and `is_false` does not use VSABackend currently (see commits 97a75366 and 2dfba73e in
# claripy). There is a chance that VSA backend can in fact handle it.
# Therefore we try to resolve it with VSABackend again
if claripy.backends.vsa.is_false(arg):
self._satisfiable = False
return
if claripy.backends.vsa.is_true(arg):
continue
# It's neither True or False. Let's try to apply the condition
# We take the argument, extract a list of constrained SIs out of it (if we could, of course), and
# then replace each original SI the intersection of original SI and the constrained one.
_, converted = self.se.constraint_to_si(arg)
for original_expr, constrained_si in converted:
if not original_expr.variables:
l.error('Incorrect original_expression to replace in add_constraints(). ' +
'This is due to defects in VSA logics inside claripy. Please report ' +
'to Fish and he will fix it if he\'s free.')
continue
new_expr = constrained_si
self.registers.replace_all(original_expr, new_expr)
for _, region in self.memory.regions.items():
region.memory.replace_all(original_expr, new_expr)
l.debug("SimState.add_constraints: Applied to final state.")
elif o.SYMBOLIC not in self.options and len(args) > 0:
for arg in args:
if self.se.is_false(arg):
self._satisfiable = False
return
def satisfiable(self, **kwargs):
if o.ABSTRACT_SOLVER in self.options or o.SYMBOLIC not in self.options:
extra_constraints = kwargs.pop('extra_constraints', ())
for e in extra_constraints:
if self.se.is_false(e):
return False
return self._satisfiable
else:
return self.se.satisfiable(**kwargs)
def downsize(self):
if 'solver_engine' in self.plugins:
self.se.downsize()
#
# State branching operations
#
# Returns a dict that is a copy of all the state's plugins
def _copy_plugins(self):
return { n: p.copy() for n,p in self.plugins.iteritems() }
def copy(self):
"""
Returns a copy of the state.
"""
if self._global_condition is not None:
raise SimStateError("global condition was not cleared before state.copy().")
c_arch = self.arch.copy()
c_plugins = self._copy_plugins()
state = SimState(arch=c_arch, plugins=c_plugins, options=self.options, mode=self.mode, os_name=self.os_name)
state.uninitialized_access_handler = self.uninitialized_access_handler
state._special_memory_filler = self._special_memory_filler
state.ip_constraints = self.ip_constraints
return state
def merge(self, *others, **kwargs):
"""
Merges this state with the other states. Returns the merging result, merged state, and the merge flag.
:param states: the states to merge
:param merge_conditions: a tuple of the conditions under which each state holds
:return: (merged state, merge flag, a bool indicating if any merging occured)
"""
merge_conditions = kwargs.pop('merge_conditions', None)
if len(kwargs) != 0:
raise ValueError("invalid arguments: %s" % kwargs.keys())
if merge_conditions is None:
# TODO: maybe make the length of this smaller? Maybe: math.ceil(math.log(len(others)+1, 2))
merge_flag = self.se.BVS("state_merge_%d" % merge_counter.next(), 16)
merge_values = range(len(others)+1)
merge_conditions = [ merge_flag == b for b in merge_values ]
else:
merge_conditions = [
(self.se.true if len(mc) == 0 else self.se.And(*mc)) for mc in merge_conditions
]
if len(set(o.arch.name for o in others)) != 1:
import ipdb; ipdb.set_trace()
raise SimMergeError("Unable to merge due to different architectures.")
all_plugins = set(self.plugins.keys()) | set.union(*(set(o.plugins.keys()) for o in others))
merged = self.copy()
merging_occurred = False
# plugins
for p in all_plugins:
our_plugin = merged.plugins[p] if p in merged.plugins else None
their_plugins = [ (pl.plugins[p] if p in pl.plugins else None) for pl in others ]
plugin_classes = (
set([our_plugin.__class__]) | set(pl.__class__ for pl in their_plugins)
) - set([None.__class__])
if len(plugin_classes) != 1:
raise SimMergeError(
"There are differing plugin classes (%s) for plugin %s" % (plugin_classes, p)
)
plugin_class = plugin_classes.pop()
our_filled_plugin = our_plugin if our_plugin is not None else merged.register_plugin(
p, plugin_class()
)
their_filled_plugins = [
(tp if tp is not None else t.register_plugin(p, plugin_class()))
for t,tp in zip(others, their_plugins)
]
plugin_state_merged = our_filled_plugin.merge(their_filled_plugins, merge_conditions)
if plugin_state_merged:
l.debug('Merging occured in %s', p)
merging_occurred = True
merged.add_constraints(merged.se.Or(*merge_conditions))
return merged, merge_conditions, merging_occurred
def widen(self, *others):
"""
Perform a widening between self and other states
:param others:
:return:
"""
merge_flag = self.se.BVS("state_merge_%d" % merge_counter.next(), 16)
merge_values = range(len(others) + 1)
if len(set(frozenset(o.plugins.keys()) for o in others)) != 1:
raise SimMergeError("Unable to merge due to different sets of plugins.")
if len(set(o.arch.name for o in others)) != 1:
raise SimMergeError("Unable to merge due to different architectures.")
widened = self.copy()
widening_occurred = False
# plugins
for p in self.plugins:
plugin_state_widened = widened.plugins[p].widen([_.plugins[p] for _ in others], merge_flag, merge_values)
if plugin_state_widened:
l.debug('Widening occured in %s', p)
widening_occurred = True
return widened, widening_occurred
#############################################
### Accessors for tmps, registers, memory ###
#############################################
def reg_concrete(self, *args, **kwargs):
"""
Returns the contents of a register but, if that register is symbolic,
raises a SimValueError.
"""
e = self.registers.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of reg_concrete is symbolic!")
return self.se.any_int(e)
def mem_concrete(self, *args, **kwargs):
"""
Returns the contents of a memory but, if the contents are symbolic,
raises a SimValueError.
"""
e = self.memory.load(*args, **kwargs)
if self.se.symbolic(e):
raise SimValueError("target of mem_concrete is symbolic!")
return self.se.any_int(e)
###############################
### Stack operation helpers ###
###############################
@arch_overrideable
def stack_push(self, thing):
"""
Push 'thing' to the stack, writing the thing to memory and adjusting the stack pointer.
"""
# increment sp
sp = self.regs.sp + self.arch.stack_change
self.regs.sp = sp
return self.memory.store(sp, thing, endness=self.arch.memory_endness)
@arch_overrideable
def stack_pop(self):
"""
Pops from the stack and returns the popped thing. The length will be the architecture word size.
"""
sp = self.regs.sp
self.regs.sp = sp - self.arch.stack_change
return self.memory.load(sp, self.arch.bits / 8, endness=self.arch.memory_endness)
@arch_overrideable
def stack_read(self, offset, length, bp=False):
"""
Reads length bytes, at an offset into the stack.
:param offset: The offset from the stack pointer.
:param length: The number of bytes to read.
:param bp: If True, offset from the BP instead of the SP. Default: False.
"""
sp = self.regs.bp if bp else self.regs.sp
return self.memory.load(sp+offset, length, endness=self.arch.memory_endness)
###############################
### Other helpful functions ###
###############################
def make_concrete_int(self, expr):
if isinstance(expr, (int, long)):
return expr
if not self.se.symbolic(expr):
return self.se.any_int(expr)
v = self.se.any_int(expr)
self.add_constraints(expr == v)
return v
# This handles the preparation of concrete function launches from abstract functions.
@arch_overrideable
def prepare_callsite(self, retval, args, convention='wtf'):
#TODO
pass
def _stack_values_to_string(self, stack_values):
"""
Convert each stack value to a string
:param stack_values: A list of values
:return: The converted string
"""
strings = [ ]
for stack_value in stack_values:
if self.se.symbolic(stack_value):
concretized_value = "SYMBOLIC - %s" % repr(stack_value)
else:
if len(self.se.any_n_int(stack_value, 2)) == 2:
concretized_value = repr(stack_value)
else:
concretized_value = repr(stack_value)
strings.append(concretized_value)
return " .. ".join(strings)
def dbg_print_stack(self, depth=None, sp=None):
"""
Only used for debugging purposes.
Return the current stack info in formatted string. If depth is None, the
current stack frame (from sp to bp) will be printed out.
"""
var_size = self.arch.bits / 8
sp_sim = self.regs.sp
bp_sim = self.regs.bp
if self.se.symbolic(sp_sim) and sp is None:
result = "SP is SYMBOLIC"
elif self.se.symbolic(bp_sim) and depth is None:
result = "BP is SYMBOLIC"
else:
sp_value = sp if sp is not None else self.se.any_int(sp_sim)
if self.se.symbolic(bp_sim):
result = "SP = 0x%08x, BP is symbolic\n" % (sp_value)
bp_value = None
else:
bp_value = self.se.any_int(bp_sim)
result = "SP = 0x%08x, BP = 0x%08x\n" % (sp_value, bp_value)
if depth is None:
# bp_value cannot be None here
depth = (bp_value - sp_value) / var_size + 1 # Print one more value
pointer_value = sp_value
for i in xrange(depth):
# For AbstractMemory, we wanna utilize more information from VSA
stack_values = [ ]
if o.ABSTRACT_MEMORY in self.options:
sp = self.regs.sp
segment_sizes = self.memory.get_segments(sp + i * var_size, var_size)
pos = i * var_size
for segment_size in segment_sizes:
stack_values.append(self.stack_read(pos, segment_size, bp=False))
pos += segment_size
else:
stack_values.append(self.stack_read(i * var_size, var_size, bp=False))
# Convert it into a big string!
val = self._stack_values_to_string(stack_values)
if pointer_value == sp_value:
line = "(sp)% 16x | %s" % (pointer_value, val)
elif pointer_value == bp_value:
line = "(bp)% 16x | %s" % (pointer_value, val)
else:
line = "% 20x | %s" % (pointer_value, val)
pointer_value += var_size
result += line + "\n"
return result
#
# Other helper methods
#
def set_mode(self, mode):
self.mode = mode
self.options = set(o.modes[mode])
@property
def thumb(self):
if not self.arch.name.startswith('ARM'):
return False
if self.regs.ip.symbolic:
# return True when IP can *only* be odd
new_state = self.copy()
new_state.add_constraints(new_state.regs.ip % 2 == 1, new_state.regs.ip % 2 != 0)
return new_state.satisfiable()
else:
concrete_ip = self.se.any_int(self.regs.ip)
return concrete_ip % 2 == 1
#
# Some pretty fancy global condition stuff!
#
@property
def with_condition(self):
@contextlib.contextmanager
def ctx(c):
old_condition = self._global_condition
try:
new_condition = c if old_condition is None else self.se.And(old_condition, c)
self._global_condition = new_condition
yield
finally:
self._global_condition = old_condition
return ctx
def _adjust_condition(self, c):
if self._global_condition is None:
return c
elif c is None:
return self._global_condition
else:
return self.se.And(self._global_condition, c)
def _adjust_condition_list(self, conditions):
if self._global_condition is None:
return conditions
elif len(conditions) == 0:
return conditions.__class__((self._global_condition,))
else:
return conditions.__class__((self._adjust_condition(self.se.And(*conditions)),))
from .plugins.symbolic_memory import SimSymbolicMemory
from .plugins.fast_memory import SimFastMemory
from .plugins.abstract_memory import SimAbstractMemory
from .s_errors import SimMergeError, SimValueError, SimStateError
from .plugins.inspect import BP_AFTER, BP_BEFORE
from .s_action import SimActionConstraint
from . import s_options as o
| {
"content_hash": "50ea30e6e0fe1aee20e75f7543c3189f",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 150,
"avg_line_length": 35.61695906432749,
"alnum_prop": 0.5722847056891881,
"repo_name": "chubbymaggie/simuvex",
"id": "9cf5454085cf15bd51193e6f5b44f1e9d0b75952",
"size": "24385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simuvex/s_state.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6276"
},
{
"name": "C++",
"bytes": "34210"
},
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "854125"
}
],
"symlink_target": ""
} |
from zope import component
from sparc import configuration
from sparc.configuration import container
import mellon
from sparc.logging import logging
logger = logging.getLogger(__name__)
@component.adapter(mellon.IMellonApplication, configuration.ISparcApplicationConfiguredEvent)
def initialize_spiders(app, event):
"""Register configured reaper storage facility"""
if container.IPyContainerConfigValue(app.get_config()).\
query('ScrapySimpleTextWebsiteCrawler'):
import mellon.factories.web_crawler.web_crawler.spiders.config_spiders
| {
"content_hash": "396e243674227ad79ad1dcb076518494",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 93,
"avg_line_length": 42.357142857142854,
"alnum_prop": 0.7571669477234402,
"repo_name": "CrowdStrike/mellon",
"id": "9dde0bfa742b26b98e7c1272d2a74be050deee5a",
"size": "593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mellon/factories/web_crawler/subscribers/app_registration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "367"
},
{
"name": "Python",
"bytes": "81615"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0014_auto_20150206_2232'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 7, 1, 40, 27, 332639, tzinfo=utc)),
preserve_default=True,
),
]
| {
"content_hash": "eb9a8461c645ec7925439a520045afb9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 109,
"avg_line_length": 25.38095238095238,
"alnum_prop": 0.6322701688555347,
"repo_name": "codingforentrepreneurs/srvup-rest-framework",
"id": "2ed72e1b66733732e867d6e6d3beffed8fe434d4",
"size": "557",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/analytics/migrations/0015_auto_20150207_0140.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "43570"
},
{
"name": "HTML",
"bytes": "39829"
},
{
"name": "JavaScript",
"bytes": "101374"
},
{
"name": "Python",
"bytes": "125924"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import matplotlib as mat
import matplotlib.pyplot as plt
import caffe
import cv2
import json
import math
# MODEL = 'ILSVRC' # ImageNet, don't use ImageNet, it wasn't trained on all categories
MODEL = 'coco' # MS-Coco
MODEL = 'ours'
IMAGE_SIZE = 300 # 300x300 trained on coco or ILSVRC
# I wonder if we can take the coco model and further train it on
# http://image-net.org/synset?wnid=n02773838
# IMAGE_SIZE = 512 # for 512x512 trained on coco
# for detection - percentage that the model is sure it's what you're looking for
THRESHOLD = 0.20
# There are 21 categories.... pick one color for each
# just a tool for label finding
# for checking if a list contains elements of another
def any_in(a, b): return bool(set(a).intersection(b))
# for picking colors of the boxes
COLORS = plt.cm.hsv(np.linspace(0, 1, 255)).tolist()
caffe.set_device(0)
caffe.set_mode_gpu()
# caffe.set_mode_cpu()
get_ipython().magic(u'matplotlib inline')
# In[2]:
from google.protobuf import text_format
from caffe.proto import caffe_pb2
# load COCO labels
if MODEL == 'ours':
labelmap_file = 'data/coco/labelmap_coco2.prototxt'
if MODEL == 'coco':
labelmap_file = 'data/coco/labelmap_coco.prototxt'
else:
labelmap_file = 'data/ILSVRC2016/labelmap_ilsvrc_det.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
# In[3]:
def loadmodel():
if IMAGE_SIZE == 300 and MODEL == 'coco':
model = 'deploy300.prototxt'
weights = 'VGG_coco_SSD_300x300_iter_400000.caffemodel'
elif IMAGE_SIZE == 512 and MODEL == 'coco':
model = 'deploy512.prototxt'
weights = 'VGG_coco_SSD_512x512_iter_360000.caffemodel'
else:
model = 'deploy2017.prototxt'
weights = 'VGG_coco_SSD_300x300_iter_60000.caffemodel'
# how you load a model with weights in Caffe
return caffe.Net(model, weights, caffe.TEST)
# In[4]:
def preprocess(frame):
# Frame must be IMG_SIZExIMG_SIZEx3
frame = cv2.resize(frame, (IMAGE_SIZE, IMAGE_SIZE),
interpolation=cv2.INTER_LANCZOS4)
# Frame must then be 3xHxW
if len(frame.shape) == 3:
frame = frame.transpose((2, 0, 1))
return frame
# In[5]:
def detect(image, net): # (Batch size, channels, Image size, Image size)
# I wonder if we can increase the batch size and
# put a list of images together, but I guess that's more for training
net.blobs['data'].reshape(1, 3, IMAGE_SIZE, IMAGE_SIZE)
# Transform the image to 1x3xSxS
net.blobs['data'].data[0, ...] = image
# See ssd_detect.ipynb from Wei Liu, author of SSD
# https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_detect.py
detections = net.forward()['detection_out']
# Parse the output tensors
det_label = detections[0, 0, :, 1]
det_conf = detections[0, 0, :, 2] # confidence
det_xmin = detections[0, 0, :, 3] # for bounding boxes per frame
det_ymin = detections[0, 0, :, 4]
det_xmax = detections[0, 0, :, 5]
det_ymax = detections[0, 0, :, 6]
# Instead of choosing a threshold, we keep all detections
top_indices = [i for i, conf in enumerate(det_conf)]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
return (det_xmin, det_ymin, det_xmax, det_ymax, det_conf, top_labels, top_label_indices)
# In[6]:
def calcDist(coords1, coords2):
return np.linalg.norm(coords1 - coords2)
# In[7]:
def loadvideo(filename, net):
cap = cv2.VideoCapture(filename)
# Actually doesn't store real frames, but the frame shape of midpoint vectors
saved_frames = []
FUZZY_MATCH = 5
FRAMES_TO_HOLD = 15
OWNER_DISTANCE = 50
while cap.isOpened():
ret, frame = cap.read()
if np.any(frame != 0):
bag_updated = []
person_updated = []
frame_processed = preprocess(frame)
processed_det = detect(frame_processed, net)
top_xmin, top_ymin, top_xmax, top_ymax, top_conf, top_labels, top_label_indices = processed_det
# Midpoint_boxes is a tensor, which has the area of the frame from the video
# But the value at each pixels position is only valid when it represents the midpoint of a detected box
# The values will be width, height, label, and "owner_y, owner_x" which is set to the coordinates
# of the person who is first within the threshold of what we consider owner if label is a bag
midpoint_boxes = np.empty((frame.shape[0], frame.shape[1], 5))
midpoint_boxes.fill(np.nan)
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * frame.shape[1]))
ymin = int(round(top_ymin[i] * frame.shape[0]))
xmax = int(round(top_xmax[i] * frame.shape[1]))
ymax = int(round(top_ymax[i] * frame.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = top_labels[i]
display_txt = '%s: %.2f' % (label_name, score)
width = xmax - xmin + 1
height = ymax - ymin + 1
midx = xmin + (width) / 2
midy = ymin + (height) / 2
if label in [1, 27, 31, 33] and score > 0.1:
obj_array = np.asarray(
[width, height, label, np.nan, np.nan])
midpoint_boxes[midy, midx] = obj_array
found = -1
if len(saved_frames) >= 1:
for j in range(len(saved_frames) - 1, -1, -1):
fuzzy_min = midy - FUZZY_MATCH if midy >= FUZZY_MATCH else 0
fuzzx_min = midx - FUZZY_MATCH if midx >= FUZZY_MATCH else 0
fuzzy_max = midy + FUZZY_MATCH if midy + \
FUZZY_MATCH < frame.shape[1] else frame.shape[1] - 1
fuzzx_max = midx + FUZZY_MATCH if midx + \
FUZZY_MATCH < frame.shape[0] else frame.shape[0] - 1
sub_sample = saved_frames[j][fuzzy_min:fuzzy_max,
fuzzx_min:fuzzx_max]
for row in range(sub_sample.shape[0]):
box = sub_sample[row]
if np.isfinite(box.flatten()).any():
for col in range(box.shape[0]):
# Previous some-odd frame at position [j][row, col]
pixel_midpoint = box[col]
# If person or object is ocluded match width _OR_ height being similar
if obj_array[0] - FUZZY_MATCH < pixel_midpoint[0] < obj_array[0] + FUZZY_MATCH or obj_array[1] - FUZZY_MATCH < pixel_midpoint[1] < obj_array[1] + FUZZY_MATCH and obj_array[2] == pixel_midpoint[2]:
midpoint_boxes[midy,
midx][3:5] = pixel_midpoint[3:5]
if label == 1:
item_type = 1
person_updated.append(
(row, col, midy, midx, pixel_midpoint[3], pixel_midpoint[4]))
# prev location, new loc
else:
bag_updated.append(
(row, col, midy, midx, pixel_midpoint[3], pixel_midpoint[4]))
# For person/bag row/col means the place the object previously was
found = (j, row, col, midy,
midx, label, pixel_midpoint[3], pixel_midpoint[3])
break
if found != -1:
break
if found != -1:
break
# Currently have in found the layer where the bag or person was last seen
# found a person, check if person has moved and see if bag has also been identified
# found a person, see if the bag was already found in this frame
if found != -1 and found[5] == 1:
for bag in bag_updated:
if bag[0] == found[6] and bag[1] == found[7]:
# Bag's old owner position was this old owner's position
midpoint_boxes[bag[2],
bag[3], 3:5] = found[3:5] # Now new owner's position is held by bag
elif found != -1: # must be a bag that we found in this frame, see if the owner was updated
for person in person_updated:
if person[0] == found[6] and person[1] == found[7]:
# Bag's old owner position was this old owner's position
midpoint_boxes[person[2],
person[3], 3:5] = found[3:5] # Now new owner's position is held by bag
if found != -1 and found[0] < FRAMES_TO_HOLD - 2:
# Must have skipped a frame so add in relevant middle position
missing_frames = FRAMES_TO_HOLD - found[0]
diff_rows = found[3] - found[1]
diff_cols = found[4] - found[2]
# May be -b, or 0 , or +a
incr_rows_per_frame = diff_rows // missing_frames
incr_cols_per_frame = diff_cols // missing_frames
makeup_mul = 0
for makeup_i in range(found[0], FRAMES_TO_HOLD):
makeup_mul += 1
saved_frames[makeup_i][found[1] + incr_rows_per_frame * makeup_mul, found[2] +
incr_cols_per_frame * makeup_mul] = saved_frames[found[0]][found[1], found[2]]
if found == -1:
# First time seeing the object, add
if label == 1: # First time seeing person
person_updated.append(
(np.nan, np.nan, midy, midx, np.nan, np.nan))
else:
bag_updated.append(
(np.nan, np.nan, midy, midx, np.nan, np.nan))
if len(saved_frames) == 0:
# Do initial attribution of owners
for i in range(frame.shape[0]):
for j in range(frame.shape[1]):
if midpoint_boxes[i, j, 0] != np.nan and midpoint_boxes[i, j, 2] in [27, 31, 33]:
min_i = i - \
OWNER_DISTANCE if (
i - OWNER_DISTANCE) > 0 else 0
max_i = i + \
OWNER_DISTANCE if (
i + OWNER_DISTANCE) < frame.shape[0] else frame.shape[0]
min_j = j - \
OWNER_DISTANCE if (
j - OWNER_DISTANCE) > 0 else 0
max_j = j + \
OWNER_DISTANCE if (
j + OWNER_DISTANCE) < frame.shape[1] else frame.shape[1] - 1
found_owner = false
potential_owners = []
bag_coord = np.asarray([i, j])
for y in range(min_i, max_i):
for x in range(min_j, max_j):
if midpoint_boxes[y, x, 0] != np.nan and midpoint_boxes[y, x, 2] == 1:
# y,x may be owner
potential_owners.append(
(y, x, calcDist(np.asarray([y, x]), bag_coord)))
potential_owners = sorted(
potential_owners, cmp=lambda a, b: a[2] - b[2])
print(potential_owners)
for bag in bag_updated:
if bag[0] == np.nan:
# new bag not seen before
bag_coord = np.asarray([bag[2], bag[3]])
min_i = bag[2] - \
OWNER_DISTANCE if (
bag[2] - OWNER_DISTANCE) > 0 else 0
max_i = bag[2] + \
OWNER_DISTANCE if (
bag[2] + OWNER_DISTANCE) < frame.shape[0] else frame.shape[0]
min_j = bag[3] - \
OWNER_DISTANCE if (
bag[3] - OWNER_DISTANCE) > 0 else 0
max_j = bag[3] + \
OWNER_DISTANCE if (
bag[3] + OWNER_DISTANCE) < frame.shape[1] else frame.shape[1] - 1
potential_owners = []
for i in range(min_i, max_i):
for j in range(min_j, max_j):
# look for person
if midpoint_boxes[i, j, 2] == 1:
# not a nan item, and is a person
potential_owners.append(i, j, calcDist(
np.asarray([i, j]), bag_coord))
potential_owners = sorted(
potential_owners, cmp=lambda a, b: a[2] - b[2])
print(potential_owners)
saved_frames.append(midpoint_boxes)
if len(saved_frames) > FRAMES_TO_HOLD:
saved_frames = saved_frames[1:]
if cv2.waitKey(1) & 0xFF == ord('q'):
print 'how did we break?'
break
# In[8]:
net = loadmodel()
loadvideo('AVSS_AB_Easy_Clipped.mov', net)
print('Finished!')
# In[80]:
# no longer outputs the images here, but they are all in the directory
| {
"content_hash": "89425b72d132808e3c69fde5ed738940",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 236,
"avg_line_length": 46.95384615384615,
"alnum_prop": 0.4741153342070773,
"repo_name": "IsThatYourBag/IsThatYourBag",
"id": "df2b7321ea51c74c2748425e70a08325abbaf49a",
"size": "15289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IsThatYourBag-caffe-numpyway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1116952"
},
{
"name": "Python",
"bytes": "278218"
},
{
"name": "Shell",
"bytes": "12773"
}
],
"symlink_target": ""
} |
"""Indicators for network status."""
from makani.avionics.common import cvt
from makani.avionics.common import pack_avionics_messages
from makani.gs.monitor2.apps.layout import indicator
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.plugins import common
from makani.lib.python import struct_tree
# TODO: Find a global definition source for it.
_XLR_RSSI_WARNING_THRESHOLD = -112 + 20
# The Microhard pDDL radios support the joystick traffic without
# packet drops down to an RSSI of -92 dBm. The additional free space
# loss from perch to maximal glide range is 11 dB.
_PDDL_RSSI_WARNING_THRESHOLD = -92 + 11
def _IsSwitchCommsLinkUp(switch_stats, port):
return (switch_stats and (switch_stats.link_status_bits & (1 << port)) and
switch_stats.stats[port].rx_multicast_packet_rate > 0)
class BaseCommsStatusIndicator(indicator.BaseAttributeIndicator):
"""Base class for comms status."""
def __init__(self, name, node_a, port_a, node_b, port_b, message_type,
show_label, ignore_error=False):
super(BaseCommsStatusIndicator, self).__init__([
(message_type, node_a, 'switch_stats'),
(message_type, node_b, 'switch_stats'),
], name)
self._ignore_error = ignore_error
self._port_a = port_a
self._port_b = port_b
self._show_label = show_label
def _ShowLinkStatus(self, is_up):
return 'Up' if is_up else 'Down'
def _DictToString(self, results, item_length=10):
text = []
keys = sorted(results.keys())
if self._show_label:
text.append(' '.join(k.rjust(item_length) for k in keys))
text.append(' '.join(results[k].rjust(item_length) for k in keys))
return '\n'.join(text)
def _Filter(self, status_a, status_b):
results = {}
total_links = 0
total_up_links = 0
if self._port_a is None:
results['Link A'] = '--'
else:
is_a_up = _IsSwitchCommsLinkUp(status_a, self._port_a)
results['Link A'] = self._ShowLinkStatus(is_a_up)
total_links += 1
if is_a_up:
total_up_links += 1
if self._port_b is None:
results['Link B'] = '--'
else:
is_b_up = _IsSwitchCommsLinkUp(status_b, self._port_b)
results['Link B'] = self._ShowLinkStatus(is_b_up)
total_links += 1
if is_b_up:
total_up_links += 1
if self._ignore_error:
stoplight = stoplights.STOPLIGHT_ANY
else:
if total_links == 0:
stoplight = stoplights.STOPLIGHT_ANY
elif total_up_links == total_links:
stoplight = stoplights.STOPLIGHT_NORMAL
elif total_up_links == 0:
stoplight = stoplights.STOPLIGHT_ERROR
else:
stoplight = stoplights.STOPLIGHT_WARNING
return self._DictToString(results), stoplight
class CommsStatusPoFIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusPoFIndicator, self).__init__(
name, 'CsGsA', 20, 'CsGsB', 20, 'CoreSwitchSlowStatus', show_label,
ignore_error=True)
class CommsStatusEoPIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusEoPIndicator, self).__init__(
name, 'CsGsA', None, 'CsGsB', None, 'CoreSwitchSlowStatus', show_label,
ignore_error=True)
class CommsStatusWifiIndicator(BaseCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(CommsStatusWifiIndicator, self).__init__(
name, 'CsGsA', 22, 'CsGsB', 18, 'CoreSwitchSlowStatus', show_label)
class JoystickRadioStatusIndicator(indicator.BaseAttributeIndicator):
"""Joystick radio status."""
def __init__(self, name):
super(JoystickRadioStatusIndicator, self).__init__([
('JoystickMonitorStatus', 'JoystickA', 'microhard_status'),
('TetherDown', 'CsB', 'comms_status'),
], name)
def _HandleStatus(self, connected, rssi):
if connected:
if rssi < _PDDL_RSSI_WARNING_THRESHOLD:
stoplight = stoplights.STOPLIGHT_WARNING
else:
stoplight = stoplights.STOPLIGHT_NORMAL
return '% 4d' % rssi, stoplight
else:
return ' n/a', stoplights.STOPLIGHT_WARNING
def _Filter(self, down_status, up_status):
if struct_tree.IsValidElement(down_status):
text, down_stoplight = self._HandleStatus(
down_status.connected, down_status.rssi)
else:
text, down_stoplight = '--', stoplights.STOPLIGHT_WARNING
result = '%s dBm down, ' % text
if struct_tree.IsValidElement(up_status):
is_connected = (up_status.links_up &
pack_avionics_messages.kTetherCommsLinkJoystick)
text, up_stoplight = self._HandleStatus(
is_connected, up_status.received_signal_strength)
else:
text, up_stoplight = '--', stoplights.STOPLIGHT_WARNING
result += '%s dBm up' % text
stoplight = stoplights.MostSevereStoplight(down_stoplight, up_stoplight)
return result, stoplight
class TetherLongRangeRadioStatusIndicator(indicator.SingleAttributeIndicator):
def __init__(self, name):
super(TetherLongRangeRadioStatusIndicator, self).__init__(
('TetherDown', 'CsGsA'), name)
def _Filter(self, tether_down):
if not struct_tree.IsValidElement(tether_down):
return 'Link down', stoplights.STOPLIGHT_ERROR
down_signal_strength = tether_down.received_signal_strength
up_signal_strength = tether_down.comms_status.received_signal_strength
text = '% 4d dBm down, % 4d dBm up' % (
down_signal_strength, up_signal_strength)
if (down_signal_strength < _XLR_RSSI_WARNING_THRESHOLD or
up_signal_strength < _XLR_RSSI_WARNING_THRESHOLD):
stoplight = stoplights.STOPLIGHT_WARNING
else:
stoplight = stoplights.STOPLIGHT_NORMAL
return text, stoplight
class BaseTetherCommsStatusIndicator(indicator.BaseAttributeIndicator):
"""Base class for tether comms status."""
def __init__(self, name, link_type, sources_per_link, link_names, show_label,
ignore_error=False):
super(BaseTetherCommsStatusIndicator, self).__init__(
self._PackArguments(sources_per_link), name)
assert len(sources_per_link) == len(link_names)
self._sources_per_link = sources_per_link
self._link_names = link_names
self._link_type = link_type
self._show_label = show_label
self._ignore_error = ignore_error
def _PackArguments(self, sources_per_link):
"""Construct the list of arguments telling statuses of various links.
Args:
sources_per_link: A list of source list. Each source list contains
TetherDownSources for a particular link.
Returns:
The packed argument list is in the form of
[<status_link_0>, <valid_link_0>, <status_link_1>, <valid_link_1>, ...]
"""
attributes = []
for sources in sources_per_link:
for source in sources:
attributes.append(
('filtered', 'merge_tether_down', 'comms_status[%d]' % source))
attributes.append(
('filtered', 'merge_tether_down',
'comms_status_valid[%d]' % source))
return attributes
def _UnpackArguments(self, *attributes):
"""Unpack attributes to comms status, valid bits, and indices per link."""
comms_status = attributes[0:len(attributes):2]
valid = attributes[1:len(attributes):2]
# A dictionary of source indices in `comms_status` and `valid` arrays
# per link. E.g., comms_status[source_indice_per_link[0][1]] is the
# comms_status for source 1 of link 0.
source_indices_per_link = {}
source_idx = 0
for link_idx, sources in enumerate(self._sources_per_link):
source_indices_per_link[link_idx] = range(
source_idx, source_idx + len(sources))
source_idx += len(sources)
return comms_status, valid, source_indices_per_link
def _ShowLinkStatus(self, is_up):
return 'Up' if is_up else 'Down'
def _DictToString(self, results, item_length=10):
text = []
keys = sorted(results.keys())
if self._show_label:
text.append(' '.join(k.rjust(item_length) for k in keys))
text.append(' '.join(results[k].rjust(item_length) for k in keys))
return '\n'.join(text)
def _Filter(self, *attributes):
comms_status, valid, source_indices_per_link = self._UnpackArguments(
*attributes)
results = {}
total_links = 0
total_up_links = 0
# Iterate through all links in the order of _sources_per_link.
for link_index in range(len(self._sources_per_link)):
link_name = self._link_names[link_index]
link_up = False
for source_idx in source_indices_per_link[link_index]:
if (valid[source_idx] and comms_status[source_idx].no_update_count <
common.MAX_NO_UPDATE_COUNT_COMMS_STATUS):
link_up = comms_status[source_idx].links_up & self._link_type
break
# Links are regarded as DOWN, if comms_status is obsolete.
results[link_name] = self._ShowLinkStatus(link_up)
total_links += 1
total_up_links += (1 if link_up else 0)
if self._ignore_error:
stoplight = stoplights.STOPLIGHT_ANY
else:
if total_links == 0:
stoplight = stoplights.STOPLIGHT_ANY
elif total_up_links == total_links:
stoplight = stoplights.STOPLIGHT_NORMAL
elif total_up_links == 0:
stoplight = stoplights.STOPLIGHT_ERROR
else:
stoplight = stoplights.STOPLIGHT_WARNING
return self._DictToString(results), stoplight
class TetherCommsStatusPoFIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusPoFIndicator, self).__init__(
name, cvt.kTetherCommsLinkPof,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label,
ignore_error=True)
class TetherCommsStatusEoPIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusEoPIndicator, self).__init__(
name, cvt.kTetherCommsLinkEop,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label,
ignore_error=True)
class TetherCommsStatusWifiIndicator(BaseTetherCommsStatusIndicator):
def __init__(self, name, show_label=True):
super(TetherCommsStatusWifiIndicator, self).__init__(
name, cvt.kTetherCommsLinkWifi,
[[cvt.kTetherDownSourceCsGsA, cvt.kTetherDownSourceCsA],
[cvt.kTetherDownSourceCsB]], ['Link A', 'Link B'], show_label)
| {
"content_hash": "131c5b4b1ae410d4f742994a42b6a22b",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 35.23,
"alnum_prop": 0.6663828176743306,
"repo_name": "google/makani",
"id": "207293f2bcf281886a52ef35e4220ced5065635c",
"size": "11158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gs/monitor2/apps/plugins/indicators/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "119408"
},
{
"name": "C",
"bytes": "20174258"
},
{
"name": "C++",
"bytes": "30512322"
},
{
"name": "CSS",
"bytes": "8921"
},
{
"name": "Dockerfile",
"bytes": "1381"
},
{
"name": "Emacs Lisp",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "65745"
},
{
"name": "Java",
"bytes": "1558475"
},
{
"name": "JavaScript",
"bytes": "130727"
},
{
"name": "Jupyter Notebook",
"bytes": "1154728"
},
{
"name": "MATLAB",
"bytes": "1026162"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "62972"
},
{
"name": "Perl",
"bytes": "870724"
},
{
"name": "Python",
"bytes": "5552781"
},
{
"name": "RPC",
"bytes": "195736"
},
{
"name": "Roff",
"bytes": "2567875"
},
{
"name": "SWIG",
"bytes": "8663"
},
{
"name": "Shell",
"bytes": "297941"
},
{
"name": "Starlark",
"bytes": "462998"
},
{
"name": "Vim Script",
"bytes": "2281"
},
{
"name": "XC",
"bytes": "50398"
},
{
"name": "XS",
"bytes": "49289"
}
],
"symlink_target": ""
} |
"""Wrappers for datasets in tfds."""
from typing import Any, Callable, Dict, Optional, Union
from robustness_metrics.common import ops
from robustness_metrics.common import pipeline_builder
from robustness_metrics.common import types
from robustness_metrics.datasets import base
import tensorflow as tf
import tensorflow_datasets as tfds
default_imagenet_preprocessing = None
default_config = "resize_small(256)|central_crop(224)|value_range(-1,1)"
default_imagenet_preprocessing = pipeline_builder.get_preprocess_fn(
default_config, remove_tpu_dtypes=False)
PreprocessFn = Callable[[types.Features], types.Features]
def _enumerated_to_metadata(position, features):
features["metadata"]["element_id"] = tf.reshape(position, [1])
return features
class TFDSDataset(base.Dataset):
"""The base class of all `tensorflow_datasets` (TFDS) datasets.
Two fields will be added to the wrapped dataset, before preprocessing it with
the given function in `load` and batching. The two fields are:
* `element_id`: A 64-bit integer identifying the element in the dataset by
applying a fingerprint function to the field provided in the initializer.
* `metadata`: A dictionary a single fields: `label`. If you want to add
extra fields to the metadata, please override `create_metadata`.
"""
def __init__(self,
dataset_builder: tfds.core.DatasetBuilder,
fingerprint_key: Optional[str] = None,
split: Union[str, tfds.Split] = "test",
label_key: Optional[str] = "label",
default_preprocess_fn: Optional[PreprocessFn] = None):
"""Initializes the object.
Args:
dataset_builder: The tfds builder for the dataset.
fingerprint_key: The name of the feature holding a string that will be
used to create an element id using a fingerprinting function. If it is
equal to None, the logic for `create_metadata` has to be overriden.
split: The name of the dataset split.
label_key: The name of the field holding the label.
default_preprocess_fn: The function used to preprocess the data in `load`
if no function is provided there.
"""
self._dataset_builder = dataset_builder
self._fingerprint_key = fingerprint_key
self._split = split
self._label_key = label_key
self._default_preprocess_fn = default_preprocess_fn
@property
def info(self) -> base.DatasetInfo:
if self._label_key:
label_feature = self._dataset_builder.info.features[self._label_key]
return base.DatasetInfo(num_classes=label_feature.num_classes)
else:
return base.DatasetInfo(num_classes=None)
def _compute_element_id(self, features: Dict[str, Any]):
"""Hash the element id to compute a unique id."""
assert_msg = "`element_id` should not be present in the feature set."
assert "element_id" not in features, assert_msg
fingerprint_feature = features[self._fingerprint_key]
return ops.fingerprint_int64(fingerprint_feature)
def create_metadata(self, features):
if self._fingerprint_key is None:
error_msg = ("If fingerprint_key=None, the logic of `create_metadata` has"
" to be overriden.")
raise NotImplementedError(error_msg)
features["metadata"] = {
"label": features[self._label_key],
"element_id": self._compute_element_id(features),
}
return features
def load(self,
preprocess_fn: Optional[PreprocessFn] = None) -> tf.data.Dataset:
if not preprocess_fn:
preprocess_fn = self._default_preprocess_fn
preprocess_fn = ops.compose(preprocess_fn,
self.create_metadata)
self._dataset_builder.download_and_prepare()
ds = self._dataset_builder.as_dataset(
split=self._split, as_supervised=False)
return ds.map(preprocess_fn)
@base.registry.register("imagenet")
class ImageNetDataset(TFDSDataset):
"""The ImageNet validation set."""
def __init__(self, split="validation"):
super().__init__(
tfds.builder("imagenet2012"),
"file_name",
split=split,
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("cifar10")
class Cifar10Dataset(TFDSDataset):
"""The CIFAR-10 dataset."""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("cifar10"),
fingerprint_key="id",
default_preprocess_fn=default_cifar_preprocessing)
@base.registry.register("cifar10_c")
class Cifar10CDataset(TFDSDataset):
"""The CIFAR10-C dataset."""
def __init__(self, corruption_type, severity):
tfds_variant_name = f"cifar10_corrupted/{corruption_type}_{severity}"
super().__init__(dataset_builder=tfds.builder(tfds_variant_name),
fingerprint_key="_SHOULD_NOT_BE_USED",
default_preprocess_fn=default_cifar_preprocessing)
def create_metadata(self, features):
features["metadata"] = {
"label": features[self._label_key],
}
return features
def load(self, preprocess_fn: Optional[PreprocessFn]) -> tf.data.Dataset:
if not preprocess_fn:
preprocess_fn = self._default_preprocess_fn
preprocess_fn = ops.compose(preprocess_fn, self.create_metadata)
ds = self._dataset_builder.as_dataset(split=self._split,
as_supervised=False)
ds = ds.map(preprocess_fn)
return ds.enumerate().map(_enumerated_to_metadata)
@base.registry.register("cifar100")
class Cifar100Dataset(TFDSDataset):
"""The CIFAR-100 dataset."""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("cifar100"),
fingerprint_key="id",
default_preprocess_fn=default_cifar_preprocessing)
@base.registry.register("oxford_flowers102")
class OxfordFlowers102Dataset(TFDSDataset):
"""The oxford_flowers102 dataset.
TFDS page: https://www.tensorflow.org/datasets/catalog/oxford_flowers102
Original page: https://www.robots.ox.ac.uk/~vgg/data/flowers/102/
"""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("oxford_flowers102"),
fingerprint_key="file_name",
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("oxford_iiit_pet")
class OxfordIiitPetDataset(TFDSDataset):
"""The oxford_iiit_pet dataset.
We only keep the 'image', 'label' and 'file_name' fields, the last one being
used for the fingerprint_key.
TFDS page: https://www.tensorflow.org/datasets/catalog/oxford_iiit_pet
Original page: http://www.robots.ox.ac.uk/~vgg/data/pets/
"""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("oxford_iiit_pet"),
fingerprint_key="file_name",
default_preprocess_fn=default_imagenet_preprocessing)
def load(self,
preprocess_fn: Optional[PreprocessFn] = None) -> tf.data.Dataset:
ds = super().load(preprocess_fn)
def delete_useless_fields(feature):
del feature["segmentation_mask"]
del feature["species"]
return feature
return ds.map(delete_useless_fields)
@base.registry.register("places365")
class Places365Dataset(TFDSDataset):
"""The places365_small dataset.
Only 'image' and 'label' are available; fingerprint_key based on the position
of the element in the dataset.
TFDS page: https://www.tensorflow.org/datasets/catalog/places365_small
Original page: http://places2.csail.mit.edu/
"""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("places365_small"),
default_preprocess_fn=default_imagenet_preprocessing)
def create_metadata(self, features):
features["metadata"] = {
"label": features[self._label_key],
}
return features
def load(self,
preprocess_fn: Optional[PreprocessFn] = None) -> tf.data.Dataset:
ds = super().load(preprocess_fn)
return ds.enumerate().map(_enumerated_to_metadata)
@base.registry.register("dtd")
class DtdDataset(TFDSDataset):
"""The Describable Textures Dataset (DTD) dataset.
TFDS page: https://www.tensorflow.org/datasets/catalog/dtd
Original page: https://www.robots.ox.ac.uk/~vgg/data/dtd/index.html
"""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("dtd"),
fingerprint_key="file_name",
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("svhn")
class SvhnDataset(TFDSDataset):
"""The Street View House Numbers (SVHN) dataset.
Only 'image' and 'label' are available; fingerprint_key based on the position
of the element in the dataset.
TFDS page: https://www.tensorflow.org/datasets/catalog/svhn_cropped
Original page: http://ufldl.stanford.edu/housenumbers/
"""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("svhn_cropped"),
default_preprocess_fn=default_imagenet_preprocessing)
def create_metadata(self, features):
features["metadata"] = {
"label": features[self._label_key],
}
return features
def load(self,
preprocess_fn: Optional[PreprocessFn] = None) -> tf.data.Dataset:
ds = super().load(preprocess_fn)
return ds.enumerate().map(_enumerated_to_metadata)
_IMAGENET_A_LABELSET = [
6, 11, 13, 15, 17, 22, 23, 27, 30, 37, 39, 42, 47, 50, 57, 70, 71, 76, 79,
89, 90, 94, 96, 97, 99, 105, 107, 108, 110, 113, 124, 125, 130, 132, 143,
144, 150, 151, 207, 234, 235, 254, 277, 283, 287, 291, 295, 298, 301, 306,
307, 308, 309, 310, 311, 313, 314, 315, 317, 319, 323, 324, 326, 327, 330,
334, 335, 336, 347, 361, 363, 372, 378, 386, 397, 400, 401, 402, 404, 407,
411, 416, 417, 420, 425, 428, 430, 437, 438, 445, 456, 457, 461, 462, 470,
472, 483, 486, 488, 492, 496, 514, 516, 528, 530, 539, 542, 543, 549, 552,
557, 561, 562, 569, 572, 573, 575, 579, 589, 606, 607, 609, 614, 626, 627,
640, 641, 642, 643, 658, 668, 677, 682, 684, 687, 701, 704, 719, 736, 746,
749, 752, 758, 763, 765, 768, 773, 774, 776, 779, 780, 786, 792, 797, 802,
803, 804, 813, 815, 820, 823, 831, 833, 835, 839, 845, 847, 850, 859, 862,
870, 879, 880, 888, 890, 897, 900, 907, 913, 924, 932, 933, 934, 937, 943,
945, 947, 951, 954, 956, 957, 959, 971, 972, 980, 981, 984, 986, 987, 988,
]
@base.registry.register("imagenet_a")
class ImageNetADataset(TFDSDataset):
"""The ImageNet-A dataset."""
@property
def info(self) -> base.DatasetInfo:
return base.DatasetInfo(num_classes=super().info.num_classes,
appearing_classes=_IMAGENET_A_LABELSET)
def __init__(self):
super().__init__(
tfds.builder("imagenet_a"),
"file_name",
"test",
default_preprocess_fn=default_imagenet_preprocessing)
_IMAGENET_R_LABELSET = [
1, 2, 4, 6, 8, 9, 11, 13, 22, 23, 26, 29, 31, 39, 47, 63, 71, 76, 79, 84,
90, 94, 96, 97, 99, 100, 105, 107, 113, 122, 125, 130, 132, 144, 145, 147,
148, 150, 151, 155, 160, 161, 162, 163, 171, 172, 178, 187, 195, 199, 203,
207, 208, 219, 231, 232, 234, 235, 242, 245, 247, 250, 251, 254, 259, 260,
263, 265, 267, 269, 276, 277, 281, 288, 289, 291, 292, 293, 296, 299, 301,
308, 309, 310, 311, 314, 315, 319, 323, 327, 330, 334, 335, 337, 338, 340,
341, 344, 347, 353, 355, 361, 362, 365, 366, 367, 368, 372, 388, 390, 393,
397, 401, 407, 413, 414, 425, 428, 430, 435, 437, 441, 447, 448, 457, 462,
463, 469, 470, 471, 472, 476, 483, 487, 515, 546, 555, 558, 570, 579, 583,
587, 593, 594, 596, 609, 613, 617, 621, 629, 637, 657, 658, 701, 717, 724,
763, 768, 774, 776, 779, 780, 787, 805, 812, 815, 820, 824, 833, 847, 852,
866, 875, 883, 889, 895, 907, 928, 931, 932, 933, 934, 936, 937, 943, 945,
947, 948, 949, 951, 953, 954, 957, 963, 965, 967, 980, 981, 983, 988,
]
@base.registry.register("imagenet_r")
class ImageNetRDataset(TFDSDataset):
"""The ImageNet-R dataset."""
@property
def info(self) -> base.DatasetInfo:
return base.DatasetInfo(num_classes=super().info.num_classes,
appearing_classes=_IMAGENET_R_LABELSET)
def __init__(self):
super().__init__(
tfds.builder("imagenet_r"),
"file_name",
"test",
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("imagenet_v2")
class ImageNetV2Dataset(TFDSDataset):
"""The ImageNet-V2 dataset."""
def __init__(self, variant):
tfds_variant_name = {
"MATCHED_FREQUENCY": "matched-frequency",
"TOP_IMAGES": "topimages",
"THRESHOLDED": "threshold-0.7",
}[variant]
super().__init__(
tfds.builder(f"imagenet_v2/{tfds_variant_name}"),
"file_name",
"test",
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("imagenet_c")
class ImageNetCDataset(TFDSDataset):
"""The ImageNet-C dataset."""
def __init__(self, corruption_type, severity, split="validation"):
tfds_variant_name = f"imagenet2012_corrupted/{corruption_type}_{severity}"
super().__init__(
tfds.builder(tfds_variant_name),
"file_name",
split=split,
default_preprocess_fn=default_imagenet_preprocessing)
@base.registry.register("synthetic")
class SyntheticData(TFDSDataset):
"""A dataset of foreground objects pasted on random backgrounds."""
def __init__(self, variant):
if variant not in ["size", "rotation", "location"]:
raise ValueError(
f"Variant {variant} not in ['size', 'rotation', 'location']")
self.variant = variant
tfds_variant_name = f"siscore/{variant}"
super().__init__(dataset_builder=tfds.builder(tfds_variant_name),
fingerprint_key="image_id",
split="test",
default_preprocess_fn=default_imagenet_preprocessing)
def create_metadata(self, features):
features["metadata"] = {
"label": features[self._label_key],
"element_id": features["image_id"],
"image_id": features["image_id"],
"dataset_variant": self.variant,
}
return features
def default_cifar_preprocessing(features: types.Features) -> types.Features:
"""Applies mean/std normalization to images."""
dtype = tf.float32
image = features["image"]
image = tf.image.convert_image_dtype(image, dtype)
mean = tf.constant([0.4914, 0.4822, 0.4465], dtype=dtype)
std = tf.constant([0.2023, 0.1994, 0.2010], dtype=dtype)
features["image"] = (image - mean) / std
return features
@base.registry.register("imagenet_sketch")
class ImageNetSketchDataset(TFDSDataset):
"""The ImageNet-Sketch Dataset."""
def __init__(self):
super().__init__(dataset_builder=tfds.builder("imagenet_sketch"),
fingerprint_key="file_name",
default_preprocess_fn=default_imagenet_preprocessing)
| {
"content_hash": "c63e2f8d5ac166e8849758eefb52fae9",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 36.47560975609756,
"alnum_prop": 0.649214309595453,
"repo_name": "google-research/robustness_metrics",
"id": "7b3acff5969e6ce32609134703872c0cec750994",
"size": "15566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robustness_metrics/datasets/tfds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "857586"
},
{
"name": "Python",
"bytes": "557042"
}
],
"symlink_target": ""
} |
from webiopi.utils.types import toint
from webiopi.devices.spi import SPI
from webiopi.devices.digital import GPIOPort
from webiopi.decorators.rest import request, response, api
from webiopi.utils.logger import debug
class TLE7238SL(GPIOPort, SPI):
#---------- Class initialisation ----------
COUNT = 8
ICRBASEADDR = 0b000 #Input configuration register base address
DCCRBASEADDR = 0b100 #Diagnostic current register base address
CMDADDR = 0b110 #Command register address
DRBASEADDR = 0b000 #Diagnostic register base address
CONTROLBANK = 0
DIAGNOSEBANK = 1
INXOFF = 0b00
INXON = 0b11
INXMASK = 0b11
DCENOFF = 0b0
DCENON = 0b1
DCENMASK = 0b1
DATAMASK = 0b00001111
WRITEFLAG = 0b10000000
WAKEFLAG = 0b1000
SLEEPFLAG = 0b0100
RD_STD_DIAGNOSE_CMD = 0b00000010
DUMMY_CMD = 0xFF
FUNCTIONS = [GPIOPort.OUT for i in range(COUNT)]
def __init__(self, chip=0, mode=1, speed=5000000, bus=None):
speed = toint(speed)
if speed < 1 or speed > 5000000:
raise ValueError("%d Hz speed out of range [%d..%d] Hz" % (speed, 0, 5000000))
SPI.__init__(self, toint(chip), toint(mode), 8, speed, bus)
GPIOPort.__init__(self, self.COUNT)
self.wake()
#---------- Abstraction framework contracts ----------
def __str__(self):
return "TLE7238SL(chip=%d, mode=%d, dev=%s)" % (self.chip, self.mode, self.device())
#---------- Additional REST mappings to support wakeup, sleep and diagnose ----------
@api("Device", 3, "feature", "driver")
@request("POST", "run/wake")
def wake(self):
self.writeRegister(self.CMDADDR, self.WAKEFLAG)
return "Wake sent"
@api("Device", 3, "feature", "driver")
@request("POST", "run/sleep")
def sleep(self):
self.writeRegister(self.CMDADDR, self.SLEEPFLAG)
return "Sleep sent"
@api("Device", 3, "feature", "driver")
@request("GET", "run/diagnose")
@response("0x%02X")
def diagnose(self):
senddata = [self.RD_STD_DIAGNOSE_CMD]
debug("%s diagnose command=[0x%02X]" % (self.__str__(), senddata[0]))
self.xfer(senddata) # 1st call
readdata = self.xfer([self.DUMMY_CMD]) # 2nd call
return readdata[0]
#---------- GPIOPort abstraction related methods ----------
def __getFunction__(self, channel):
return self.FUNCTIONS[channel]
def __setFunction__(self, channel, value):
if not value in [self.IN, self.OUT]:
raise ValueError("Requested function not supported")
# Reset channel to off state and en/disable diagnostic current when changing function
self.__digitalWrite__(channel, 0)
if value == self.IN:
self.__setDiagnosticCurrent__(channel, True)
else:
self.__setDiagnosticCurrent__(channel, False)
self.FUNCTIONS[channel] = value
def __digitalRead__(self, channel):
if self.FUNCTIONS[channel] == self.OUT:
# Read ICRx values for output ports
addr = self.__getAddress__(self.ICRBASEADDR, channel)
data = self.readRegister(addr, self.CONTROLBANK)
shift = self.__shiftForChannel__(channel)
bits = (data >> shift) & self.INXMASK
if bits == self.INXOFF:
return 0
elif bits == self.INXON:
return 1
else:
raise ValueError("Unsupported values in input control register")
else:
# Read DRx values for input ports
addr = self.__getAddress__(self.DRBASEADDR, channel)
data = self.readRegister(addr, self.DIAGNOSEBANK)
shift = self.__shiftForChannel__(channel)
return (data >> shift + 1) & 0x01
def __digitalWrite__(self, channel, value):
# Read full 4 register bits, then set relevant 2 bits and rewrite
addr = self.__getAddress__(self.ICRBASEADDR, channel)
shift = self.__shiftForChannel__(channel)
currentData = self.readRegister(addr, self.CONTROLBANK)
currentBits = (currentData >> shift) & self.INXMASK
newBits = self.INXON << shift
if (value & (currentBits == self.INXON)) | ((not value) & (currentBits == self.INXOFF)):
return # value is already correct, nothing to do, ommit write command
elif value:
newData = currentData + newBits
else:
newData = currentData - newBits
self.writeRegister(addr, newData)
def __portRead__(self):
value = 0
for i in range(self.count):
value |= self.__digitalRead__(i) << i
return
def __portWrite__(self, value):
for i in range(self.count):
self.__digitalWrite__(i, (value >> i) & 0x01)
return
#---------- Local helpers ----------
def __getAddress__(self, register, channel=0):
return register + int(channel / 2)
def __shiftForChannel__(self, channel):
return (channel % 2) * 2
def __getDCCRxAddress__(self, channel=0):
return self.DCCRBASEADDR + int(channel / 4)
def __dccrxShiftForChannel__(self, channel):
return channel % 4
def __setDiagnosticCurrent__(self, channel, enable=True):
# Read full 4 register bits, then set relevant bit and rewrite
addr = self.__getDCCRxAddress__(channel)
shift = self.__dccrxShiftForChannel__(channel)
currentData = self.readRegister(addr, self.CONTROLBANK)
currentBit = (currentData >> shift) & self.DCENMASK
newBit = self.DCENON << shift
if (enable & (currentBit == self.DCENON)) | ((not enable) & (currentBit == self.DCENOFF)):
return # enable is already correct, nothing to do, ommit write command
elif enable:
newData = currentData + newBit
else:
newData = currentData - newBit
self.writeRegister(addr, newData)
def readRegister(self, addr, bank):
# Sending two SPI bytes at once seems not work, cut this into two sequential SPI send calls
cmd = self.__readRegisterCommand__(addr, bank)
debug("%s readregister command=[0x%02X]" % (self.__str__(), cmd))
self.xfer([cmd]) # 1st call to send command byte for read
readdata = self.xfer([self.DUMMY_CMD]) # 2nd dummy call to push out and receive SPI slave out value
return readdata[0] & self.DATAMASK
def __readRegisterCommand__(self, addr, bank):
return (addr << 4) | (bank & 0x01)
def writeRegister(self, addr, value):
cmd = self.__writeRegisterCommand__(addr, value)
debug("%s writeRegister command=[0x%02X]" % (self.__str__(), cmd))
self.writeBytes([cmd]) # 1st call
def __writeRegisterCommand__(self, addr, data):
return (addr << 4) | (data & self.DATAMASK) | self.WRITEFLAG
| {
"content_hash": "a3d914d400b07f9724f80997542df499",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 107,
"avg_line_length": 36.129533678756474,
"alnum_prop": 0.5967302452316077,
"repo_name": "ariegg/webiopi-drivers",
"id": "e5d56fc02cc917927be8c7bb744338c9a963b8ff",
"size": "9031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chips/digital/tle7238sl/tle7238sl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "17100"
},
{
"name": "Python",
"bytes": "539777"
}
],
"symlink_target": ""
} |
'''
xbmcswift2.request
------------------
This module contains the Request class. This class represents an incoming
request from XBMC.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
from xbmcswift2.common import unpickle_args
import urlparse
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
class Request(object):
'''The request objects contains all the arguments passed to the plugin via
the command line.
:param url: The complete plugin URL being requested. Since XBMC typically
passes the URL query string in a separate argument from the
base URL, they must be joined into a single string before being
provided.
:param handle: The handle associated with the current request.
'''
def __init__(self, url, handle):
#: The entire request url.
self.url = url
#: The current request's handle, an integer.
self.handle = int(handle)
# urlparse doesn't like the 'plugin' scheme, so pass a protocol
# relative url, e.g. //plugin.video.helloxbmc/path
self.scheme, remainder = url.split(':', 1)
parts = urlparse.urlparse(remainder)
self.netloc, self.path, self.query_string = (
parts[1], parts[2], parts[4])
self.args = unpickle_args(parse_qs(self.query_string))
| {
"content_hash": "c81374a902b4e4bf851607a5ec3bb0ea",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 33.325581395348834,
"alnum_prop": 0.6454989532449407,
"repo_name": "neno1978/xbmctorrent",
"id": "2a8a7c39586fdfaf6b471f734bbec680ecb176e1",
"size": "1433",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "resources/site-packages/xbmcswift2/request.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "1046777"
},
{
"name": "Smarty",
"bytes": "822"
}
],
"symlink_target": ""
} |
"""
The MIT License (MIT)
Copyright (c) 2014 - 2015 Jos "Zarthus" Ahrens and contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
conversion.py by Zarthus
Licensed under MIT
This is a module that introduces commands to convert currencies.
"""
from core import moduletemplate
import requests
import time
class Conversion(moduletemplate.BotModule):
def on_module_load(self):
self.requireApiKey("open_exchange_rates")
self.register_command("currency", "<amount> <currency> <other currency>",
"Convert <amount> <currency> to <other currency>.",
self.PRIV_NONE, ["cur", "convert"])
self.register_command("currencyinfo", "<currency>", "Retrieve informaiton about <currency>.",
self.PRIV_NONE, ["curinfo"])
self.register_command("temperature", "<temperature> <c/f/k> <c/f/k>",
("Convert degrees in <1> to degrees in <2> (c = celsius, f = fahrenheit, k = kelvin). "
"Some aliases of this command need no second and third parameter."),
self.PRIV_NONE, ["temp", "cf", "fc", "ck", "kc", "fk", "kf"])
self.register_command("weight", "<weight> <kg/lb>",
"Convert <weight> to <kg/lb>. Aliases of this command need no second parameter.",
self.PRIV_NONE, ["kg", "lb"])
self.register_command("distance", "<distance> <km/mi>",
"Convert <distance> to <km/mi>. Some aliases need no second parameter.",
self.PRIV_NONE, ["dist", "km", "m", "mi", "miles"])
self.cache = {"convert": {}, "info": {}}
self.cache_currency_convert()
self.cache_currency_info()
self.kelvin = ["k", "kelvin"]
self.fahrenheit = ["f", "fahrenheit"]
self.celsius = ["c", "celsius", "celcius"]
self.temp_list = self.kelvin + self.fahrenheit + self.celsius
self.kg = ["kg", "kilogram", "kilograms"]
self.lb = ["lb", "lbs", "pound"]
self.weight_list = self.kg + self.lb
self.km = ["km", "kilometers", "kilometres"]
self.mi = ["m", "mi", "miles"]
self.distance_list = self.km + self.mi
def on_command(self, target, nick, command, commandtext, mod, admin):
if command == "currency" or command == "cur" or command == "convert":
if not commandtext or len(commandtext.split()) != 3:
return self.notice(nick, "Usage: currency <amount> <currency> <other currency>")
split = commandtext.split()
amount = split[0]
currency = split[1].upper()
ocurrency = split[2].upper()
if not amount.isdigit():
return self.notice(nick, "<amount> needs to be a numeric value.")
if not currency.isalpha():
return self.notice(nick, "<currency> may only be alphabetic letters.")
if not ocurrency.isalpha():
return self.notice(nick, "<other currency> may only be alphabetic letters.")
if ocurrency == currency:
return self.notice(nick, "<currency> and <other currency> may not be the same.")
oamount = self.currency_convert(int(amount), currency, ocurrency)
if oamount and type(oamount) == float or type(oamount) == int:
return self.message(target, nick, "$(bold){} {}$(clear) is equal to $(bold){} {}$(clear)."
.format(amount, currency, round(oamount, 3), ocurrency), True)
elif oamount:
return self.message(target, nick, "An error occured: {}".format(oamount))
else:
return self.message(target, nick, "Was unable to convert {}{} to {}."
.format(amount, currency, ocurrency))
if command == "currencyinfo" or command == "curinfo":
if not commandtext:
return self.notice(nick, "Usage: currencyinfo <currency>")
currency = commandtext.upper()
unabbr = self.currency_info(currency)
if unabbr:
# Yes, the wikipedia url *may* be invalid, but I trust that most of these do exist.
return self.message(target, nick, "{} information: Unabbreviated '{}', Wikipedia: {}"
.format(currency, unabbr, "https://en.wikipedia.org/wiki/{}"
.format(unabbr.replace(" ", "_"))))
else:
return self.message(target, nick, "Currency '{}' does not exist or is not known.".format(currency))
if command in ["cf", "fc", "ck", "kc", "fk", "kf"]: # Aliases for 'temperature'
commandtext = "{} {} {}".format(commandtext, command[0], command[1])
command = "temperature"
if command == "temperature" or command == "temp":
ct = commandtext.lower().split()
if not commandtext or len(ct) != 3 or ct[1] not in self.temp_list or ct[2] not in self.temp_list:
return self.notice(nick, "Syntax: temperature <temperature> <c/f/k> <c/f/k>")
if not ct[0].replace(".", "").isdigit():
return self.notice(nick, "Temperature has to be a digit")
if ct[1] == ct[2]:
return self.notice(nick, "You cannot convert two of the same temperatures.")
temp = float(ct[0])
tempname = ""
newtemp = 0
newtempname = ""
if ct[1] in self.celsius and ct[2] in self.fahrenheit: # c -> f
newtemp = temp * 9 / 5 + 32
tempname = "°C"
newtempname = "°F"
elif ct[1] in self.celsius and ct[2] in self.kelvin: # c -> k
newtemp = temp + 273.15
tempname = "°C"
newtempname = "K"
elif ct[1] in self.fahrenheit and ct[2] in self.celsius: # f -> c
newtemp = (temp - 32) * 5 / 9
tempname = "°F"
newtempname = "°C"
elif ct[1] in self.fahrenheit and ct[2] in self.kelvin: # f -> k
newtemp = 5 / 9 * (temp - 32) + 273.15
tempname = "°F"
newtempname = "K"
elif ct[1] in self.kelvin and ct[2] in self.celsius: # k -> c
newtemp = temp - 273.15
tempname = "°C"
newtempname = "K"
elif ct[1] in self.kelvin and ct[2] in self.fahrenheit: # k -> f
newtemp = 9 / 5 * (temp - 273.15) + 32
tempname = "K"
newtempname = "°F"
else:
self.warning("Temperature Conversion: Not found in lists: {}".format(commandtext))
return self.notice(nick, "An error occured: Conversion type was not found.")
newtemp = round(newtemp, 2)
tempstring = ("$(bold){} {}$(clear) is equal to $(bold){} {}"
.format(temp, tempname, newtemp, newtempname))
return self.message(target, nick, tempstring, True)
if command in ["kg", "lb"]: # Aliases for 'weight'
commandtext = "{} {}".format(commandtext, command)
command = "weight"
if command == "weight":
ct = commandtext.lower().split()
if not commandtext or len(ct) != 2 or ct[1] not in self.weight_list:
return self.notice(nick, "Syntax: weight <weight> <kg/lb>")
if not ct[0].replace(".", "").isdigit():
return self.notice(nick, "Weight has to be a digit")
weight = float(ct[0])
weightname = ""
newweight = 0
newweightname = ""
if ct[1] in self.kg:
newweight = weight / 2.2046
weightname = "lb" if int(weight) == 1 else "lbs"
newweightname = "kg"
elif ct[1] in self.lb:
newweight = weight * 2.2046
weightname = "kg"
newweightname = "lb" if int(newweight) == 1 else "lbs"
else:
self.warning("Weight Conversion: Not found in lists: {}".format(commandtext))
return self.notice(nick, "An error occured: Conversion type was not found.")
newweight = round(newweight, 2)
weightstring = ("$(bold){} {}$(clear) is equal to $(bold){} {}"
.format(weight, weightname, newweight, newweightname))
return self.message(target, nick, weightstring, True)
if command in ["km", "m", "mi"]:
commandtext = "{} {}".format(commandtext, command)
command = "distance"
if command == "distance" or command == "dist":
ct = commandtext.lower().split()
if not commandtext or len(ct) != 2 or ct[1] not in self.distance_list:
return self.notice(nick, "Syntax: distance <distance> <km/mi>")
if not ct[0].replace(".", "").isdigit():
return self.notice(nick, "Distance has to be a digit")
dist = float(ct[0])
distname = ""
newdist = 0
newdistname = ""
if ct[1] in self.km:
newdist = dist / 0.62137
distname = "mile" if int(dist) == 1 else "miles"
newdistname = "kilometer" if int(newdist) == 1 else "kilometers"
elif ct[1] in self.mi:
newdist = dist * 1.60934
distname = "kilometer" if int(dist) == 1 else "kilometers"
newdistname = "mile" if int(newdist) == 1 else "miles"
else:
self.warning("Distance Conversion: Not found in lists: {}".format(commandtext))
return self.notice(nick, "An error occured: Conversion type was not found.")
newdist = round(newdist, 2)
diststring = ("$(bold){} {}$(clear) is equal to $(bold){} {}"
.format(dist, distname, newdist, newdistname))
return self.message(target, nick, diststring, True)
def currency_convert(self, amount, currency, ocurrency):
if time.time() > self.cache["convert"]["age"] + 3600 * 6:
self.cache_currency_convert()
if "rates" in self.cache["convert"]["json"]:
json = self.cache["convert"]["json"]
# One of <currency>'s worth (in json["base"] -- default USD)
currency_one = 0
ocurrency_one = 0
if currency in json["rates"]:
currency_one = json["rates"][currency]
if ocurrency in json["rates"]:
ocurrency_one = json["rates"][ocurrency]
if currency_one == 0:
return "Currency '{}' was not found.".format(currency)
if ocurrency_one == 0:
return "Currency '{}' was not found.".format(ocurrency)
curInUsd = currency_one ** -1 # 1 currency in USD
curInUsd = curInUsd * amount # Tells us how much USD the amount is worth.
return curInUsd * ocurrency_one
return False
def currency_info(self, currency):
if time.time() > self.cache["info"]["age"] + 3600 * 6:
self.cache_currency_info()
if currency in self.cache["info"]["json"]:
return self.cache["info"]["json"][currency]
return False
def cache_currency_convert(self):
api_url = "http://openexchangerates.org/api/latest.json"
api_key = self.api_key["open_exchange_rates"]
payload = {
"app_id": api_key
}
json = None
try:
r = requests.get(api_url, params=payload)
r.raise_for_status()
json = r.json()
except Exception as e:
self.warning("Error occured caching currency conversion: {}".format(str(e)))
return False
self.cache["convert"]["age"] = time.time()
self.cache["convert"]["json"] = json
self.log_verbose("Cached currency conversions.")
return True
def cache_currency_info(self):
api_url = "http://openexchangerates.org/api/currencies.json"
api_key = self.api_key["open_exchange_rates"]
payload = {
"app_id": api_key
}
json = None
try:
r = requests.get(api_url, params=payload)
r.raise_for_status()
json = r.json()
except Exception as e:
self.warning("Error occured caching currency info: {}".format(str(e)))
return False
self.cache["info"]["age"] = time.time()
self.cache["info"]["json"] = json
self.log_verbose("Cached currency information.")
return True
| {
"content_hash": "19cba2fc8d2a8eede3811d93b1958dd9",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 117,
"avg_line_length": 43.111111111111114,
"alnum_prop": 0.5405211912943871,
"repo_name": "Zarthus/Reconcile",
"id": "de718b769f8844cac4148e4dc35e3d4414d6f1b7",
"size": "13976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303919"
},
{
"name": "Shell",
"bytes": "3942"
}
],
"symlink_target": ""
} |
"""
Class for running task-oriented dialogue chats.
Specifically this class handles:
1. Setting up and running the different conversational agents to fit the TOD Conversational structure (see `tod_core.py`; also retiterated in TodWorld below)
2. Handle logic for batching when running dialogues
3. Recording various metrics associated with running the world.
See long comment on TodWorld for description of the conversation format and more functionality descriptions.
Metrics calculated from these simulations are documented in `world_metrics.py` (for
general usage) and `world_metrics_handlers.py` (for specific metric calculations)
"""
from parlai.core.metrics import Metric, LegacyMetric
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.worlds import World
from parlai.agents.local_human.local_human import LocalHumanAgent
from parlai.utils.misc import display_messages
import parlai.core.tod.tod_core as tod
import parlai.core.tod.world_metrics as tod_metrics
import sys
import copy
# Following needs to be kept consistent with opt settings/tod script
USER_UTT_IDX = 0
API_CALL_IDX = 1
API_RESP_IDX = 2
SYSTEM_UTT_IDX = 3
API_SCHEMA_GROUNDING_IDX = 4
GOAL_GROUNDING_IDX = 5
AGENT_COUNT = 6
SPEAKER_TO_NAME = {
USER_UTT_IDX: tod.TodAgentType.USER_UTT_AGENT,
API_CALL_IDX: tod.TodAgentType.API_CALL_AGENT,
API_RESP_IDX: tod.TodAgentType.API_RESP_AGENT,
SYSTEM_UTT_IDX: tod.TodAgentType.SYSTEM_UTT_AGENT,
API_SCHEMA_GROUNDING_IDX: tod.TodAgentType.API_SCHEMA_GROUNDING_AGENT,
GOAL_GROUNDING_IDX: tod.TodAgentType.GOAL_GROUNDING_AGENT,
}
NAME_TO_IDX = {v: k for k, v in SPEAKER_TO_NAME.items()}
class TodWorld(World):
"""
Base world for running TOD model-model chats. Includes the following agents:
* User utt agent
* API call agent
* Currently assumed to be same as system utt agent in script code, though used as if separate in this world for clarity
* API responder agent
* System utt agent
* API schema groundinger agent (given to api call + response agent)
* Goal groundinger agent (given to user)
As is standard for ParlAI, these agents may be models or may be standalone classes that extend the "Agent" class. The models for these *are* expected to have their utterances in a standard format.
We do expect these agents to be passed in with a set order (see above), since some assumptions of regular ParlAI Worlds (ex. task = agent[0], model = agent[1]) are broken here since there is no "task agent" and one agent can be two "roles" (ex. system agent also making API calls)
"""
def __init__(self, opt: Opt, agents=None, shared=None):
super().__init__(opt, agents, shared)
self.batchsize = opt["batchsize"]
self.batch_agents = []
self.batch_acts = []
self.batch_goals = [] # for case when num_episodes < batchsize
self.batch_tod_world_metrics = []
for i in range(self.batchsize):
here_agents = []
for j, agent in enumerate(agents):
if (
j == SYSTEM_UTT_IDX
): # handle separately cause we expect it to be same as API_CALL agent
here_agents.append(here_agents[API_CALL_IDX])
continue
share = agent.share()
batch_opt = copy.deepcopy(share["opt"])
batch_opt["batchindex"] = i
here_agents.append(share["class"](batch_opt, share))
self.batch_agents.append(here_agents)
self.batch_acts.append([Message.padding_example()] * 4)
self.batch_tod_world_metrics.append(tod_metrics.TodMetrics())
self.end_episode = [False] * self.batchsize
self.max_turns = self.opt.get("max_turns", 30)
self.turns = 0
self.need_grounding = True
def grounding(self):
"""
Preempt with goal and schema-based intent schemas.
As a logging hack, we stick the schema gronding in as a user utterance, but
manually pass the value in to the relevant API call/resp agent, since passing it
to the API call agent elsewhere is a little awkward. Similarly, we stick the
goal as a system utterance so that it is captured in logging. However, we do not
pass it in manually, since getting the user utterance will be the first turn of
`parley()`.
"""
self._observe_and_act(
SYSTEM_UTT_IDX, # Doesn't matter, empty at this point
USER_UTT_IDX, # Hack in to a place that'll look nice when printing
f"getting API schema grounding. (Must start with `{tod.STANDARD_API_SCHEMAS}`)",
API_SCHEMA_GROUNDING_IDX,
)
self._observe_and_act(
USER_UTT_IDX,
API_CALL_IDX,
"responding to api schema grounding (empty enter is usually fine) ",
)
self._observe_and_act(
USER_UTT_IDX,
API_RESP_IDX,
"responding to api schema grounding (empty enter is usually fine)",
)
self._observe_and_act(
SYSTEM_UTT_IDX, # Doesn't matter for the most part, but want something empty
SYSTEM_UTT_IDX, # Hack into a place per comment above
f"getting goal grounding. (Must start with `{tod.STANDARD_GOAL}`)",
GOAL_GROUNDING_IDX,
)
self.batch_goals = [act[SYSTEM_UTT_IDX] for act in self.batch_acts]
self.turns = 0
def parley(self):
if self.need_grounding:
self.grounding()
self.need_grounding = False
else:
self._observe_and_act(SYSTEM_UTT_IDX, USER_UTT_IDX)
self._observe_and_act(USER_UTT_IDX, API_CALL_IDX)
self._observe_and_act(API_CALL_IDX, API_RESP_IDX)
self._observe_and_act(API_RESP_IDX, SYSTEM_UTT_IDX)
self.turns += 1
self.update_counters()
def _observe_and_act(
self, observe_idx, act_idx, info="for regular parley", override_act_idx=None
):
act_agent_idx = override_act_idx if override_act_idx else act_idx
act_agent = self.agents[act_agent_idx]
record_output_idx = act_idx
if hasattr(act_agent, "batch_act"):
batch_observations = []
for i in range(self.batchsize):
if not self.end_episode[i]:
observe = self.batch_acts[i][observe_idx]
observe = self.batch_agents[i][act_agent_idx].observe(observe)
batch_observations.append(Message(observe))
else:
# We're done with this episode, so just do a pad.
# NOTE: This could cause issues with RL down the line
batch_observations.append(Message.padding_example())
self.batch_acts[i][record_output_idx] = {"text": "", "id": ""}
batch_actions = act_agent.batch_act(batch_observations)
for i in range(self.batchsize):
if self.end_episode[i]:
continue
self.batch_acts[i][record_output_idx] = batch_actions[i]
self.batch_agents[i][record_output_idx].self_observe(batch_actions[i])
else: # Run on agents individually
for i in range(self.batchsize):
act_agent = (
self.batch_agents[i][override_act_idx]
if override_act_idx
else self.batch_agents[i][act_idx]
)
if hasattr(act_agent, "episode_done") and act_agent.episode_done():
self.end_episode[i] = True
if self.end_episode[i]:
# Following line exists because:
# 1. Code for writing converseations is not hapy if an "id" does not exists with a sample
# 2. Because of the `self.end_episode` code, no agent will see this example anyway.
self.batch_acts[i][record_output_idx] = {"text": "", "id": ""}
continue
act_agent.observe(self.batch_acts[i][observe_idx])
if isinstance(act_agent, LocalHumanAgent):
print(
f"Getting message for {SPEAKER_TO_NAME[record_output_idx]} for {info} in batch {i}"
)
try:
self.batch_acts[i][record_output_idx] = act_agent.act()
except StopIteration:
self.end_episode[i] = True
for i in range(self.batchsize):
if self.end_episode[i]:
continue
self.batch_tod_world_metrics[i].handle_message(
self.batch_acts[i][record_output_idx], SPEAKER_TO_NAME[act_agent_idx]
)
if tod.STANDARD_DONE in self.batch_acts[i][record_output_idx].get(
"text", ""
):
# User models trained to output a "DONE" on last turn; same with human agents.
self.end_episode[i] = True
def report(self):
"""
Report all metrics of all subagents + of this world in aggregate.
"""
metrics_separate = []
for i in range(self.batchsize):
here_metrics = self.batch_tod_world_metrics[i].report()
for name, agent in [
(SPEAKER_TO_NAME[j], self.batch_agents[i][j])
for j in [USER_UTT_IDX, API_CALL_IDX, API_RESP_IDX, SYSTEM_UTT_IDX]
]:
name_prefix = name[:-6] # strip "_agent"
if hasattr(agent, "report"):
m = agent.report()
if m is None:
continue
for k, v in m.items():
if not isinstance(v, Metric):
v = LegacyMetric(v)
here_metrics[f"{name_prefix}_{k}"] = v
metrics_separate.append(here_metrics)
metrics = metrics_separate[0]
for i in range(1, self.batchsize):
for k, v in metrics_separate[i].items():
if k not in metrics:
metrics[k] = v
else:
metrics[k] = metrics[k] + v
return metrics
def reset(self):
"""
Resets state of world; also sets up episode metrics.
"""
super().reset()
self.need_grounding = True
self.turns = 0
self.last_batch_episode_metrics = []
self.batch_acts = []
for i in range(self.batchsize):
for agent in self.batch_agents[i]:
agent.reset()
self.batch_acts.append([None] * 4)
self.batch_tod_world_metrics[i].episode_reset()
metrics = self.batch_tod_world_metrics[i].get_last_episode_metrics()
if metrics:
self.last_batch_episode_metrics.append(metrics)
self.end_episode = [False] * self.batchsize
def get_last_batch_episode_metrics(self):
return self.last_batch_episode_metrics
def get_last_batch_goals(self):
return self.batch_goals
def episode_done(self):
if self.turns >= self.max_turns or all(self.end_episode):
return True
for i in range(self.batchsize):
for j in [USER_UTT_IDX, API_CALL_IDX, API_RESP_IDX, SYSTEM_UTT_IDX]:
if (
self.batch_acts[i][j] is not None
and tod.STANDARD_DONE in self.batch_acts[i][j].get("text", "")
) or (
hasattr(self.batch_agents[i][j], "episode_done")
and self.batch_agents[i][j].episode_done()
):
self.end_episode[i] = True
return all(self.end_episode)
def epoch_done(self):
for agent in self.agents:
if agent.epoch_done():
return True
def num_episodes(self):
result = sys.maxsize
for agent in self.agents:
if hasattr(agent, "num_episodes") and agent.num_episodes() > 0:
result = min(result, agent.num_episodes())
if result == sys.maxsize:
return 0
return result
def get_batch_acts(self):
return self.batch_acts
def display(self):
s = "[--batchsize " + str(self.batchsize) + "--]\n"
for i in range(self.batchsize):
s += "[batch " + str(i) + ":]\n"
s += display_messages(
self.batch_acts[i],
ignore_agent_reply=self.opt.get("ignore_agent_reply", False),
add_fields=self.opt.get("display_add_fields", ""),
prettify=self.opt.get("display_prettify", False),
max_len=self.opt.get("max_display_len", 1000),
verbose=self.opt.get("verbose", False),
)
s += "\n"
s += "[--end of batch--]\n"
return s
| {
"content_hash": "574a579c24d77e6571c3d9699bcc75d9",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 284,
"avg_line_length": 42.172077922077925,
"alnum_prop": 0.5759488798213873,
"repo_name": "facebookresearch/ParlAI",
"id": "32ff7072889333cbc9007f9c0df3feb4ea31e1b6",
"size": "13188",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/core/tod/tod_world.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
"""This code example gets all creative templates.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeTemplateService.getCreativeTemplatesByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_template_service = client.GetService(
'CreativeTemplateService', version='v201403')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative templates by statement.
while True:
response = creative_template_service.getCreativeTemplatesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for template in response['results']:
print ('Creative template with id \'%s\', name \'%s\', and type \'%s\' '
'was found.' % (template['id'],
template['name'],
template['type']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "5a0dc66a1c670256942be6d0a6064467",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 32.41304347826087,
"alnum_prop": 0.6733735747820255,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "ece79ea8079e7172159e80e79084861892b92a02",
"size": "2109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201403/creative_template_service/get_all_creative_templates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
} |
import os
from distutils.core import setup
from ccfiletypes import VERSION
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
setup(
name='django-ccfiletypes',
version=VERSION,
license = 'BSD 3 Clause',
description='Django templatefilters for displaying info about files.',
long_description=open('README.rst').read(),
author='c&c',
author_email='studio@designcc.co.uk',
url='https://github.com/designcc/django-ccfiletypes',
package_data={
'ccfiletypes': [
'static/ccfiletypes/LICENSE',
'static/ccfiletypes/small/*.png',
'static/ccfiletypes/medium/*.png',
'static/ccfiletypes/large/*.png',
'static/ccfiletypes/xlarge/*.png',
],
},
packages=[
'ccfiletypes',
'ccfiletypes.templatetags',
],)
| {
"content_hash": "0c3a59c6418c260c26ee99b6005ca3cb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 27.870967741935484,
"alnum_prop": 0.6111111111111112,
"repo_name": "designcc/django-ccfiletypes",
"id": "575e7b17ba518c38bf2a0f3a08a9a272aea417fb",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "9837"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import fnmatch
import tinctest
from tinctest.lib import local_path
from mpp.gpdb.tests.storage.persistent_tables.sqls.generate_sqls import GenerateSqls
from mpp.lib.PSQL import PSQL
'''
Creates and runs the pre-requisite SQL files before the actual Load starts
'''
class InitialSetup():
def createSQLFiles(self):
tinctest.logger.info('Creating the SQL files under setup folder')
schema = GenerateSqls()
table_types = ('ao', 'co', 'heap')
for table_type in table_types:
schema.create_table_setup('table',table_type,table_type)
schema.create_table_setup('insert_tb',table_type,table_type)
schema.create_table_setup('insert_tb',table_type + '_part',table_type,'yes')
schema.create_table_setup('drop_tb',table_type,table_type)
def runSQLFiles(self):
tinctest.logger.info('Running SQL files under the setup folder')
for file in os.listdir(local_path('setup')):
if fnmatch.fnmatch(file,'*_table_pre.sql'):
PSQL.run_sql_file(local_path('setup/' + file))
| {
"content_hash": "8c78846d1282223a8c6ef25610f3e490",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 38.06521739130435,
"alnum_prop": 0.7155910908052542,
"repo_name": "edespino/gpdb",
"id": "0a689d925150460ba0cad12475feb312ed781acc",
"size": "1751",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/persistent_tables/sqls/InitialSetup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3737"
},
{
"name": "Batchfile",
"bytes": "11369"
},
{
"name": "C",
"bytes": "36580146"
},
{
"name": "C++",
"bytes": "3396346"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "164"
},
{
"name": "DTrace",
"bytes": "3746"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "740582"
},
{
"name": "HTML",
"bytes": "354931"
},
{
"name": "Java",
"bytes": "186576"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "195794"
},
{
"name": "M4",
"bytes": "97709"
},
{
"name": "Makefile",
"bytes": "440584"
},
{
"name": "Objective-C",
"bytes": "42255"
},
{
"name": "PLSQL",
"bytes": "218116"
},
{
"name": "PLpgSQL",
"bytes": "5424886"
},
{
"name": "Perl",
"bytes": "3911633"
},
{
"name": "Perl 6",
"bytes": "8302"
},
{
"name": "Python",
"bytes": "8130606"
},
{
"name": "Roff",
"bytes": "39530"
},
{
"name": "Ruby",
"bytes": "26862"
},
{
"name": "SQLPL",
"bytes": "3939815"
},
{
"name": "Shell",
"bytes": "571615"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "519516"
}
],
"symlink_target": ""
} |
"""
Sales Cron jobs
"""
from treeio.sales.models import Subscription
def subscription_check():
"Automatically depreciate assets as per their depreciation rate"
subscriptions = Subscription.objects.all()
for subscription in subscriptions:
subscription.check_status()
| {
"content_hash": "2902f8dc8c7130b53bd0881aa9ae0986",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 26.636363636363637,
"alnum_prop": 0.7337883959044369,
"repo_name": "rogeriofalcone/treeio",
"id": "41c487f4f1e0e3e8c011ff3f977e69389b1ed799",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sales/cron.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
import ycm_core
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-std=c++11',
'-x',
'c++',
'-I',
'.',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-I',
'./include',
'-I',
'./lib/include',
'-I',
'/home/grads/njcz19/include'
]
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "eb6d6317438d0d01e518966362ff5a1a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 72,
"avg_line_length": 23.710280373831775,
"alnum_prop": 0.6491919590067008,
"repo_name": "jwlawson/qvtrim",
"id": "100b0e52acf2197588ee8afe05c1bce1577ef858",
"size": "2537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46413"
},
{
"name": "Makefile",
"bytes": "1197"
},
{
"name": "Python",
"bytes": "2537"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.comments.forms import CommentForm
class pCMSCommentForm(CommentForm):
name = forms.CharField(label=u"Όνομα", max_length=50)
email = forms.EmailField(label=u"Email",required=False)
| {
"content_hash": "257bc04f9fa63349576b479868620d8f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 39,
"alnum_prop": 0.782051282051282,
"repo_name": "sv1jsb/pCMS",
"id": "2fa2ee8424a445c64213975497a84db911dd774d",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pCMS/pcomments/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "582047"
},
{
"name": "Python",
"bytes": "74399"
}
],
"symlink_target": ""
} |
"""Tests for Conv2D via the XLA JIT.
The canned results in these tests are created by running each test using the
Tensorflow CPU device and saving the output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import test_utils
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
("_data_format_HWNC", "HWNC"),
("_data_format_HWCN", "HWCN"),
)
class Conv2DTest(XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that tf.nn.conv2d produces the expected value.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Strides.
dilations: RHS dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(filter_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(filter_sizes)
strides = [1] + strides + [1]
if dilations is None:
dilations = [1, 1]
dilations = [1] + dilations + [1]
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.test_session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
with self.test_scope():
out = nn_ops.conv2d(
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format_dst,
dilations=dilations)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = np.reshape([
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
], [1, 2, 3, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter2x1Dilation(self, data_format):
expected_output = np.array([[[[72], [82], [92]], [[112], [122], [132]]]])
self._VerifyValues(
input_sizes=[1, 4, 4, 1],
filter_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = np.reshape([
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
], [1, 2, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = np.reshape([2271.0, 2367.0, 2463.0], [1, 1, 1, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = np.reshape(
[2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0], [1, 1, 2, 3])
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyDilation(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 3],
filter_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0, 2, 3, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([2667, 2781, 2895], [1, 1, 1, 3]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.array([[[[231, 252, 273], [384, 423, 462]],
[[690, 765, 840], [843, 936, 1029]]]]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeDilation(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.reshape([108, 128], [1, 1, 1, 2]))
class Conv2DBackpropInputTest(XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_input produces the expected output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Strides.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(filter_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, input_sizes)
# Convert between data formats.
expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.test_session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
out = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=t1,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(out, {t1: x1, t2: x2})
self.assertAllEqual(input_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [
5, 11, 17, 11, 25, 39, 17, 39, 61, 23, 53, 83, 29, 67, 105, 35, 81, 127,
41, 95, 149, 47, 109, 171, 53, 123, 193, 59, 137, 215, 65, 151, 237, 71,
165, 259, 77, 179, 281, 83, 193, 303, 89, 207, 325, 95, 221, 347.
]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [1, 2, 0, 2, 4]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [1, 2, 0, 2, 4, 0, 0]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [1, 4, 7, 7, 23, 33]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 167, 212, 257, 122, 140, 158, 478, 541, 604,
437, 482, 527
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterSame(self, data_format):
expected_output = [
14, 32, 50, 100, 163, 226, 217, 334, 451, 190, 307, 424, 929, 1217,
1505, 1487, 1883, 2279
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [1, 4, 4, 3, 10, 8, 5, 16, 12]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterSame(self, data_format):
expected_output = [1, 4, 7, 4, 13, 16, 7, 22, 25]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [1, 2, 5, 4, 6, 0, 0, 0, 0, 0, 3, 6, 13, 8, 12]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [1, 2, 2, 3, 4, 6]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 4, 7, 10, 13, 10, 0, 0, 0, 0, 0, 0, 3, 10, 17, 24, 31, 20])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 0, 2, 3, 0, 4])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropInputDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[0, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self, data_format):
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._VerifyValues(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 1, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
14, 32, 50, 68, 86, 104, 0, 0, 0, 0, 0, 0, 122, 140, 158, 176, 194,
212
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[5, 0, 11, 0, 0, 0, 17, 0, 23])
class Conv2DBackpropFilterTest(XLATestCase, parameterized.TestCase):
def _VerifyValues(self,
input_sizes=None,
filter_sizes=None,
out_backprop_sizes=None,
strides=None,
dilations=None,
padding=None,
data_format_src="NHWC",
data_format_dst="NHWC",
expected=None):
"""Tests that gen_nn_ops.conv2d_backprop_filter produces the right output.
Args:
input_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
out_backprop_sizes: Output gradients tensor dimensions.
strides: Stride.
dilations: Dilations.
padding: Padding type.
data_format_src: Data format input is in.
data_format_dst: Data format verification will run and input is converted
to.
expected: Expected output.
"""
total_size_1 = np.prod(input_sizes)
total_size_2 = np.prod(out_backprop_sizes)
x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(input_sizes)
x2 = np.arange(
1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes)
strides = [1] + strides + [1]
if dilations is not None:
dilations = [1] + dilations + [1]
expected = np.reshape(expected, filter_sizes)
# Convert between data formats.
x1 = test_utils.ConvertBetweenDataFormats(x1, data_format_src,
data_format_dst)
x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src,
data_format_dst)
input_sizes = test_utils.PermuteDimsBetweenDataFormats(
input_sizes, data_format_src, data_format_dst)
out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(
out_backprop_sizes, data_format_src, data_format_dst)
strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src,
data_format_dst)
if dilations is not None:
dilations = test_utils.PermuteDimsBetweenDataFormats(
dilations, data_format_src, data_format_dst)
with self.test_session() as sess:
t1 = array_ops.placeholder(dtypes.float32, shape=input_sizes)
t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes)
with self.test_scope():
tensor = gen_nn_ops.conv2d_backprop_filter(
input=t1,
filter_sizes=filter_sizes,
out_backprop=t2,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format_dst)
value = sess.run(tensor, {t1: x1, t2: x2})
self.assertAllEqual(filter_sizes, value.shape)
self.assertAllClose(expected, value, 1e-3)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x1Filter(self, data_format):
expected_output = [8056, 8432, 8312, 8704, 8568, 8976]
self._VerifyValues(
input_sizes=[1, 4, 4, 3],
filter_sizes=[1, 1, 3, 2],
out_backprop_sizes=[1, 4, 4, 2],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2Filter(self, data_format):
expected_output = [120, 141]
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 3, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterDepth1(self, data_format):
expected_output = [5, 8, 14, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Filter(self, data_format):
expected_output = [
17, 22, 27, 22, 29, 36, 27, 36, 45, 32, 43, 54, 37, 50, 63, 42, 57, 72,
62, 85, 108, 67, 92, 117, 72, 99, 126, 77, 106, 135, 82, 113, 144, 87,
120, 153
]
self._VerifyValues(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width5(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 5, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width6(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 6, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x2FilterStride3Width7(self, data_format):
expected_output = [9, 12]
self._VerifyValues(
input_sizes=[1, 1, 7, 1],
filter_sizes=[1, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[3, 3],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3Filter(self, data_format):
expected_output = [5, 8, 11]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSame(self, data_format):
expected_output = [20, 30, 20]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 4, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D1x3FilterSameOutbackprop2(self, data_format):
expected_output = [7, 10, 3]
self._VerifyValues(
input_sizes=[1, 1, 4, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterC1Same(self, data_format):
expected_output = [91, 58, 32, 17]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 2, 3, 1],
strides=[1, 1],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2(self, data_format):
expected_output = [92, 102, 112]
self._VerifyValues(
input_sizes=[1, 3, 5, 1],
filter_sizes=[1, 3, 1, 1],
out_backprop_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2FilterStride2Same(self, data_format):
expected_output = [7, 2, 16, 5]
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding="SAME",
data_format_src="NHWC",
data_format_dst=data_format,
expected=expected_output)
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[55, 70, 235, 250])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
out_backprop_sizes=[1, 1, 1, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 3, 4, 6])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DEmptyBackpropFilterDilation1x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
out_backprop_sizes=[1, 1, 1, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=np.zeros([0]))
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
out_backprop_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[
17, 22, 27, 22, 29, 36, 27, 36, 45, 47, 64, 81, 52, 71, 90, 57, 78,
99, 137, 190, 243, 142, 197, 252, 147, 204, 261, 167, 232, 297, 172,
239, 306, 177, 246, 315
])
@parameterized.named_parameters(*DATA_FORMATS)
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(
self, data_format):
self._VerifyValues(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
out_backprop_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format_src="NHWC",
data_format_dst=data_format,
expected=[1, 2, 3, 6, 7, 14, 9, 18])
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "8d58b825cd71ec2d72b96cd175ff1b63",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 80,
"avg_line_length": 35.14387633769322,
"alnum_prop": 0.5872919204222493,
"repo_name": "gojira/tensorflow",
"id": "d12e1ff1e8f4564f39642bd0b64fc40d8dca8ef0",
"size": "30245",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/tests/conv2d_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "337045"
},
{
"name": "C++",
"bytes": "41535344"
},
{
"name": "CMake",
"bytes": "201232"
},
{
"name": "Go",
"bytes": "1147256"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "737815"
},
{
"name": "Jupyter Notebook",
"bytes": "2155207"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48293"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "35216559"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428390"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'QuestionSet.heading'
db.alter_column(u'questionnaire_questionset', 'heading', self.gf('django.db.models.fields.CharField')(max_length=256))
def backwards(self, orm):
# Changing field 'QuestionSet.heading'
db.alter_column(u'questionnaire_questionset', 'heading', self.gf('django.db.models.fields.CharField')(max_length=64))
models = {
u'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'answer': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Question']"}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Subject']"})
},
u'questionnaire.choice': {
'Meta': {'object_name': 'Choice'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Question']"}),
'sortid': ('django.db.models.fields.IntegerField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_en': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'checks': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'extra_en': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'footer_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'questionset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.QuestionSet']"}),
'sort_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'text_en': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'base_template': ('django.db.models.fields.CharField', [], {'default': "'surveys/base.html'", 'max_length': '255'}),
'base_url': ('django.db.models.fields.CharField', [], {'default': "'/$LANG/'", 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'redirect_url': ('django.db.models.fields.CharField', [], {'default': "'/static/complete.html'", 'max_length': '128'})
},
u'questionnaire.questionset': {
'Meta': {'object_name': 'QuestionSet'},
'checks': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Questionnaire']"}),
'sortid': ('django.db.models.fields.IntegerField', [], {}),
'text_en': ('django.db.models.fields.TextField', [], {})
},
u'questionnaire.runinfo': {
'Meta': {'object_name': 'RunInfo'},
'cookies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'emailcount': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'emailsent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastemailerror': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'questionset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.QuestionSet']", 'null': 'True', 'blank': 'True'}),
'random': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skipped': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Subject']"}),
'tags': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'questionnaire.runinfohistory': {
'Meta': {'object_name': 'RunInfoHistory'},
'completed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Questionnaire']"}),
'runid': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'skipped': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questionnaire.Subject']"}),
'tags': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'questionnaire.subject': {
'Meta': {'object_name': 'Subject'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'formtype': ('django.db.models.fields.CharField', [], {'default': "'email'", 'max_length': '16'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'unset'", 'max_length': '8', 'blank': 'True'}),
'givenname': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'nextrun': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'inactive'", 'max_length': '16'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['questionnaire'] | {
"content_hash": "3409c1e68c5b26290d47d497610ed30a",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 156,
"avg_line_length": 70.98113207547169,
"alnum_prop": 0.5538277511961722,
"repo_name": "affan2/ed-questionnaire",
"id": "981511862a690dddfada0e3a65c0e91efecb9900",
"size": "7548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questionnaire/migrations/0005_auto__chg_field_questionset_heading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7083"
},
{
"name": "HTML",
"bytes": "16720"
},
{
"name": "JavaScript",
"bytes": "6213"
},
{
"name": "Python",
"bytes": "169302"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='Cards',
version='0.1.1',
description='Cards and poker backend',
author='Allen Majewski',
author_email='altoidnerd.btc@gmail.com',
url='',
packages=['cards'],
)
| {
"content_hash": "d0a7d979e36a1850f3a770a83eef2ec7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 23.4,
"alnum_prop": 0.6410256410256411,
"repo_name": "Altoidnerd/cards",
"id": "957fd082bccf7478aa0015005b576f4a9a292ec0",
"size": "257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24236"
}
],
"symlink_target": ""
} |
import os, os.path, sys
import json
import optparse
import random
import subprocess
import tempfile
import time
class ScalingTester:
def __init__(self, url, object_file, user_file,
match_field, filter_fields, method, client_location):
self._url = url
self._method = method
self._object_file = object_file
self._user_file = user_file
self._match_field = match_field
self._filter_fields = self._parseFilterFields(filter_fields)
self._client_location = client_location
self._object_ids = self._parseObjectIDs()
self._options_file = self._generateOptionsFile()
self._users = self._parseUsers()
def _parseObjectIDs(self):
ids_raw = open(self._object_file).read()
ids = ids_raw.split('\n')
return [id.strip() for id in ids if len(id.strip()) > 0]
def _parseUsers(self):
users_raw = open(self._user_file).read()
return json.loads(users_raw)
def _parseFilterFields(self, filter_fields):
if not filter_fields: return None
return [ff.strip() for ff in filter_fields.split(',')]
# Make an options file for querying all objects by ID by field
def _generateOptionsFile(self):
(fd, filename) = tempfile.mkstemp()
os.close(fd)
options = {'match' : {self._match_field : self._object_ids}}
if self._filter_fields:
options['filter'] = self._filter_fields
options_data = json.dumps(options)
open(filename, 'w').write(options_data)
return filename
def invoke(self, num_concurrent):
(fd, script_filename) = tempfile.mkstemp()
os.close(fd)
script_file = open(script_filename, 'w')
for i in range(num_concurrent):
user_index = random.randint(0, len(self._users)-1)
user_cert = self._users[user_index]['cert']
user_key = self._users[user_index]['key']
# print "CERT = %s KEY = %s" % (user_cert, user_key)
command_template = "time (python %s --key %s --cert %s " + \
"--options_file %s --method %s --url %s 2&>1 > /dev/null) &\n"
command = command_template % \
(self._client_location, user_key, user_cert, \
self._options_file, self._method, self._url)
script_file.write(command)
script_file.close()
# print "SC = " + script_filename
output = self.run_script(script_filename)
real_timing_raw = \
[line for line in output.split('\n') if line.startswith('real')]
real_timing = [rt.split('\t')[1] for rt in real_timing_raw]
secs = [self.parseTime(rt) for rt in real_timing]
mean = 0;
for sec in secs: mean = mean + sec
mean = mean / num_concurrent
print "Mean %s : %s" % (mean, secs)
def run_script(self, script_filename):
run_script_command = ["/bin/bash", script_filename]
proc = subprocess.Popen(run_script_command, stderr=subprocess.PIPE)
result = ''
chunk = proc.stderr.read()
while chunk:
result = result + chunk
chunk = proc.stderr.read()
return result
def parseTime(self, time_min_sec):
parts = time_min_sec.split('m')
min = int(parts[0])
sec = float(parts[1].split('s')[0])
sec = 60*min + sec
return sec
def parseOptions(args):
parser = optparse.OptionParser()
parser.add_option("--url", help="URL of service to which to connect",
default=None)
parser.add_option("--object_file",
help="File containing list of object IDs",
default=None)
parser.add_option("--user_file",
help="JSON File containing list {cert, key} dicts",
default=None)
parser.add_option("--method",
help="Name of method to invoke",
default='lookup_slices')
parser.add_option("--match_field", help="Name of object field to match",
default="SLICE_UID")
parser.add_option("--filter_fields", help="List of object fields to select",
default=None)
parser.add_option("--client_location", help="Location of client.py",
default = "client.py")
parser.add_option("--num_concurrent",
help="Number concurrent calls", default=1)
parser.add_option("--frequency", help='Time to wait between invocations',
default=5)
parser.add_option("--num_iterations", help="Total iterations to run",
default=10)
[opts, args] = parser.parse_args(args)
if not opts.url or not opts.object_file or not opts.user_file:
print "--url and --object_file and --user_file are required"
sys.exit(0)
return opts
def main(args = sys.argv):
opts = parseOptions(args)
st = ScalingTester(opts.url, opts.object_file, opts.user_file, \
opts.match_field, opts.filter_fields, \
opts.method, opts.client_location)
num_iters = int(opts.num_iterations)
for iter in range(num_iters):
st.invoke(int(opts.num_concurrent))
if iter < num_iters-1:
time.sleep(int(opts.frequency))
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "65e2483b2a75e7803a65840ceba92f69",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 80,
"avg_line_length": 37.986111111111114,
"alnum_prop": 0.5661791590493601,
"repo_name": "ahelsing/geni-ch",
"id": "93f3221fff000a090aea165e737a8a4e03e1ded5",
"size": "6687",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tools/chapi_scaling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "999"
},
{
"name": "Makefile",
"bytes": "1539"
},
{
"name": "Python",
"bytes": "627979"
},
{
"name": "Shell",
"bytes": "17153"
}
],
"symlink_target": ""
} |
from google.appengine.ext import db
class Config(db.Model):
name = db.StringProperty(required=True)
num_shards = db.IntegerProperty(required=True, default=20)
class Shard(db.Model):
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=True, default=0)
| {
"content_hash": "2d507dc807f8f135bbab983f14f4cb52",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.7389830508474576,
"repo_name": "jcrocholl/nxdom",
"id": "935b9cd856e29290ae467e79fd0132ed57e29c1d",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "counters/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "30328"
},
{
"name": "Python",
"bytes": "1440928"
}
],
"symlink_target": ""
} |
"""Example of using Python 3 function annotations to define
request arguments and output schemas.
Run the app:
$ python examples/annotations_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http GET :5001/users/42
"""
import random
import functools
from flask import Flask, request
from marshmallow import Schema
from webargs import fields
from webargs.flaskparser import parser
app = Flask(__name__)
##### Routing wrapper ####
def route(*args, **kwargs):
"""Combines `Flask.route` and webargs parsing. Allows arguments to be specified
as function annotations. An output schema can optionally be specified by a
return annotation.
"""
def decorator(func):
@app.route(*args, **kwargs)
@functools.wraps(func)
def wrapped_view(*a, **kw):
annotations = getattr(func, "__annotations__", {})
reqargs = {
name: value
for name, value in annotations.items()
if isinstance(value, fields.Field) and name != "return"
}
response_schema = annotations.get("return")
schema_cls = Schema.from_dict(reqargs)
partial = request.method != "POST"
parsed = parser.parse(schema_cls(partial=partial), request)
kw.update(parsed)
response_data = func(*a, **kw)
if response_schema:
return response_schema.dump(response_data)
else:
return func(*a, **kw)
return wrapped_view
return decorator
##### Fake database and model #####
class Model:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def insert(cls, db, **kwargs):
collection = db[cls.collection]
new_id = None
if "id" in kwargs: # for setting up fixtures
new_id = kwargs.pop("id")
else: # find a new id
found_id = False
while not found_id:
new_id = random.randint(1, 9999)
if new_id not in collection:
found_id = True
new_record = cls(id=new_id, **kwargs)
collection[new_id] = new_record
return new_record
class User(Model):
collection = "users"
db = {"users": {}}
##### Views #####
@route("/", methods=["GET"])
def index(name: fields.Str(missing="Friend")): # noqa: F821
return {"message": f"Hello, {name}!"}
@route("/add", methods=["POST"])
def add(x: fields.Float(required=True), y: fields.Float(required=True)):
return {"result": x + y}
class UserSchema(Schema):
id = fields.Int(dump_only=True)
username = fields.Str(required=True)
first_name = fields.Str()
last_name = fields.Str()
@route("/users/<int:user_id>", methods=["GET", "PATCH"])
def user_detail(user_id, username: fields.Str(required=True) = None) -> UserSchema():
user = db["users"].get(user_id)
if not user:
return {"message": "User not found"}, 404
if request.method == "PATCH":
user.update(username=username)
return user
# Return validation errors as JSON
@app.errorhandler(422)
@app.errorhandler(400)
def handle_error(err):
headers = err.data.get("headers", None)
messages = err.data.get("messages", ["Invalid request."])
if headers:
return {"errors": messages}, err.code, headers
else:
return {"errors": messages}, err.code
if __name__ == "__main__":
User.insert(
db=db, id=42, username="fred", first_name="Freddie", last_name="Mercury"
)
app.run(port=5001, debug=True)
| {
"content_hash": "6edebed401c3fe97e5c8bce7c01b727f",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 85,
"avg_line_length": 26.95035460992908,
"alnum_prop": 0.5955263157894737,
"repo_name": "sloria/webargs",
"id": "134b29e15121a543586c01bdbdc2a26f9f2c4459",
"size": "3800",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "examples/annotations_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182724"
}
],
"symlink_target": ""
} |
from multiverse.mars.plugins import GroupPlugin
##
# Group Configuration File
##
# Register stats for group object to track and send to each client
GroupPlugin.RegisterStat("health")
GroupPlugin.RegisterStat("health-max")
# Set maximum group size
GroupPlugin.SetMaxGroupSize(8)
| {
"content_hash": "9d939d2e9a28a9cc0410947e85147f6d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 23.416666666666668,
"alnum_prop": 0.797153024911032,
"repo_name": "longde123/MultiversePlatform",
"id": "f424dacd58a971950c8ccfcee370b4837a2cfce4",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/config/sampleworld/group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "1148"
},
{
"name": "Batchfile",
"bytes": "56002"
},
{
"name": "C",
"bytes": "2958956"
},
{
"name": "C#",
"bytes": "11292123"
},
{
"name": "C++",
"bytes": "428039"
},
{
"name": "CSS",
"bytes": "107446"
},
{
"name": "Groff",
"bytes": "3653"
},
{
"name": "HTML",
"bytes": "767415"
},
{
"name": "Inno Setup",
"bytes": "2093"
},
{
"name": "Java",
"bytes": "4444010"
},
{
"name": "JavaScript",
"bytes": "115349"
},
{
"name": "Makefile",
"bytes": "35639"
},
{
"name": "Matlab",
"bytes": "2076"
},
{
"name": "Objective-C",
"bytes": "44581"
},
{
"name": "Perl",
"bytes": "6299"
},
{
"name": "Python",
"bytes": "4648545"
},
{
"name": "Scheme",
"bytes": "48864"
},
{
"name": "Shell",
"bytes": "880494"
},
{
"name": "XSLT",
"bytes": "1834"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from hdfs_zkfc import zkfc
from hdfs import hdfs
class ZkfcSlave(Script):
def install(self, env):
import params
self.install_packages(env, params.exclude_packages)
env.set_params(params)
zkfc(action="configure")
def start(self, env):
import params
env.set_params(params)
self.configure(env)
zkfc(action="start")
def stop(self, env):
import params
env.set_params(params)
zkfc(action="stop")
def configure(self, env):
hdfs()
pass
def status(self, env):
import status_params
env.set_params(status_params)
zkfc(action="status")
if __name__ == "__main__":
ZkfcSlave().execute()
| {
"content_hash": "97d8ac74b9df28fffc3d5d8b4d82c3c5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 72,
"avg_line_length": 25.189655172413794,
"alnum_prop": 0.728952772073922,
"repo_name": "keedio/keedio-stacks",
"id": "165d9cd19311b2dcc73760737f9cf6238a129d97",
"size": "1461",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "KEEDIO/1.0/services/HDFS/package/scripts/zkfc_slave.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "386"
},
{
"name": "Python",
"bytes": "1080418"
},
{
"name": "Shell",
"bytes": "50473"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import copy
import shlex
import unittest
from typing import Any
from unittest import mock
from unittest.mock import MagicMock
from uuid import UUID
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.apache.beam.hooks.beam import BeamCommandRunner, BeamHook
from airflow.providers.google.cloud.hooks.dataflow import (
DEFAULT_DATAFLOW_LOCATION,
DataflowHook,
DataflowJobStatus,
DataflowJobType,
_DataflowJobsController,
_fallback_to_project_id_from_variables,
process_line_and_extract_dataflow_job_id_callback,
)
DEFAULT_RUNNER = "DirectRunner"
BEAM_STRING = 'airflow.providers.apache.beam.hooks.beam.{}'
TASK_ID = 'test-dataflow-operator'
JOB_NAME = 'test-dataflow-pipeline'
MOCK_UUID = UUID('cf4a56d2-8101-4217-b027-2af6216feb48')
MOCK_UUID_PREFIX = str(MOCK_UUID)[:8]
UNIQUE_JOB_NAME = f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}'
TEST_TEMPLATE = 'gs://dataflow-templates/wordcount/template_file'
PARAMETERS = {
'inputFile': 'gs://dataflow-samples/shakespeare/kinglear.txt',
'output': 'gs://test/output/my_output',
}
PY_FILE = 'apache_beam.examples.wordcount'
JAR_FILE = 'unitest.jar'
JOB_CLASS = 'com.example.UnitTest'
PY_OPTIONS = ['-m']
DATAFLOW_VARIABLES_PY = {'project': 'test', 'staging_location': 'gs://test/staging', 'labels': {'foo': 'bar'}}
DATAFLOW_VARIABLES_JAVA = {
'project': 'test',
'stagingLocation': 'gs://test/staging',
'labels': {'foo': 'bar'},
}
RUNTIME_ENV = {
'additionalExperiments': ['exp_flag1', 'exp_flag2'],
'additionalUserLabels': {'name': 'wrench', 'mass': '1.3kg', 'count': '3'},
'bypassTempDirValidation': {},
'ipConfiguration': 'WORKER_IP_PRIVATE',
'kmsKeyName': (
'projects/TEST_PROJECT_ID/locations/TEST_LOCATIONS/keyRings/TEST_KEYRING/cryptoKeys/TEST_CRYPTOKEYS'
),
'maxWorkers': 10,
'network': 'default',
'numWorkers': 2,
'serviceAccountEmail': 'test@apache.airflow',
'subnetwork': 'regions/REGION/subnetworks/SUBNETWORK',
'tempLocation': 'gs://test/temp',
'workerRegion': "test-region",
'workerZone': 'test-zone',
'zone': 'us-central1-f',
'machineType': 'n1-standard-1',
}
BASE_STRING = 'airflow.providers.google.common.hooks.base_google.{}'
DATAFLOW_STRING = 'airflow.providers.google.cloud.hooks.dataflow.{}'
TEST_PROJECT = 'test-project'
TEST_JOB_ID = 'test-job-id'
TEST_LOCATION = 'custom-location'
DEFAULT_PY_INTERPRETER = 'python3'
TEST_FLEX_PARAMETERS = {
"containerSpecGcsPath": "gs://test-bucket/test-file",
"jobName": 'test-job-name',
"parameters": {
"inputSubscription": 'test-subscription',
"outputTable": "test-project:test-dataset.streaming_beam_sql",
},
}
TEST_PROJECT_ID = 'test-project-id'
TEST_SQL_JOB_NAME = 'test-sql-job-name'
TEST_DATASET = 'test-dataset'
TEST_SQL_OPTIONS = {
"bigquery-project": TEST_PROJECT,
"bigquery-dataset": TEST_DATASET,
"bigquery-table": "beam_output",
'bigquery-write-disposition': "write-truncate",
}
TEST_SQL_QUERY = """
SELECT
sales_region as sales_region,
count(state_id) as count_state
FROM
bigquery.table.test-project.beam_samples.beam_table
GROUP BY sales_region;
"""
TEST_SQL_JOB_ID = 'test-job-id'
DEFAULT_CANCEL_TIMEOUT = 5 * 60
class TestFallbackToVariables(unittest.TestCase):
def test_support_project_id_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(project_id="TEST")
mock_instance.assert_called_once_with(project_id="TEST")
def test_support_project_id_from_variable_parameter(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
FixtureFallback().test_fn(variables={'project': "TEST"})
mock_instance.assert_called_once_with(project_id='TEST', variables={})
def test_raise_exception_on_conflict(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException,
match="The mutually exclusive parameter `project_id` and `project` key in `variables` parameter "
"are both present\\. Please remove one\\.",
):
FixtureFallback().test_fn(variables={'project': "TEST"}, project_id="TEST2")
def test_raise_exception_on_positional_argument(self):
mock_instance = mock.MagicMock()
class FixtureFallback:
@_fallback_to_project_id_from_variables
def test_fn(self, *args, **kwargs):
mock_instance(*args, **kwargs)
with pytest.raises(
AirflowException, match="You must use keyword arguments in this methods rather than positional"
):
FixtureFallback().test_fn({'project': "TEST"}, "TEST2")
class TestDataflowHook(unittest.TestCase):
def setUp(self):
self.dataflow_hook = DataflowHook(gcp_conn_id='google_cloud_default')
self.dataflow_hook.beam_hook = MagicMock()
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.DataflowHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.build")
def test_dataflow_client_creation(self, mock_build, mock_authorize):
result = self.dataflow_hook.get_conn()
mock_build.assert_called_once_with(
'dataflow', 'v1b3', http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables["region"] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = TEST_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_multiple_extra_packages(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
py_requirements = ["pandas", "numpy"]
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
passed_variables['extra-package'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=py_requirements,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables['extra-package'] = ['a.whl', 'b.whl']
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=py_requirements,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
('python3',),
('python2',),
('python3',),
('python3.6',),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_custom_interpreter(
self, py_interpreter, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=py_interpreter,
py_requirements=None,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=py_interpreter,
py_options=PY_OPTIONS,
py_requirements=None,
py_system_site_packages=False,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@parameterized.expand(
[
(['foo-bar'], False),
(['foo-bar'], True),
([], True),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_python_dataflow_with_non_empty_py_requirements_and_without_system_packages(
self,
current_py_requirements,
current_py_system_site_packages,
mock_callback_on_job_id,
mock_dataflow_wait_for_done,
mock_uuid,
):
mock_beam_start_python_pipeline = self.dataflow_hook.beam_hook.start_python_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_PY)
expected_variables["job_name"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_python_pipeline.assert_called_once_with(
variables=expected_variables,
py_file=PY_FILE,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_options=PY_OPTIONS,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
def test_start_python_dataflow_with_empty_py_requirements_and_without_system_packages(
self, mock_dataflow_wait_for_done, mock_uuid
):
self.dataflow_hook.beam_hook = BeamHook(runner="DataflowRunner")
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"), self.assertRaisesRegex(
AirflowException, "Invalid method invocation."
):
self.dataflow_hook.start_python_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_PY,
dataflow=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=DEFAULT_PY_INTERPRETER,
py_requirements=[],
on_new_job_id_callback=on_new_job_id_callback,
)
mock_dataflow_wait_for_done.assert_not_called()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow(self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_multiple_values_in_variables(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['mock-option'] = ['a.whl', 'b.whl']
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(passed_variables)
expected_variables["jobName"] = job_name
expected_variables["region"] = DEFAULT_DATAFLOW_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=DEFAULT_DATAFLOW_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_variable(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
passed_variables: dict[str, Any] = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
passed_variables['region'] = TEST_LOCATION
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=passed_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.wait_for_done'))
@mock.patch(DATAFLOW_STRING.format('process_line_and_extract_dataflow_job_id_callback'))
def test_start_java_dataflow_with_custom_region_as_parameter(
self, mock_callback_on_job_id, mock_dataflow_wait_for_done, mock_uuid
):
mock_beam_start_java_pipeline = self.dataflow_hook.beam_hook.start_java_pipeline
mock_uuid.return_value = MOCK_UUID
on_new_job_id_callback = MagicMock()
job_name = f"{JOB_NAME}-{MOCK_UUID_PREFIX}"
with self.assertWarnsRegex(DeprecationWarning, "This method is deprecated"):
self.dataflow_hook.start_java_dataflow(
job_name=JOB_NAME,
variables=DATAFLOW_VARIABLES_JAVA,
jar=JAR_FILE,
job_class=JOB_CLASS,
on_new_job_id_callback=on_new_job_id_callback,
location=TEST_LOCATION,
)
expected_variables = copy.deepcopy(DATAFLOW_VARIABLES_JAVA)
expected_variables["jobName"] = job_name
expected_variables["region"] = TEST_LOCATION
expected_variables["labels"] = '{"foo":"bar"}'
mock_callback_on_job_id.assert_called_once_with(on_new_job_id_callback)
mock_beam_start_java_pipeline.assert_called_once_with(
variables=expected_variables,
jar=JAR_FILE,
job_class=JOB_CLASS,
process_line_callback=mock_callback_on_job_id.return_value,
)
mock_dataflow_wait_for_done.assert_called_once_with(
job_id=mock.ANY, job_name=job_name, location=TEST_LOCATION, multiple_jobs=False
)
@parameterized.expand(
[
(JOB_NAME, JOB_NAME, False),
('test-example', 'test_example', False),
(f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}', JOB_NAME, True),
(f'test-example-{MOCK_UUID_PREFIX}', 'test_example', True),
('df-job-1', 'df-job-1', False),
('df-job', 'df-job', False),
('dfjob', 'dfjob', False),
('dfjob1', 'dfjob1', False),
]
)
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
def test_valid_dataflow_job_name(self, expected_result, job_name, append_job_name, mock_uuid4):
job_name = self.dataflow_hook.build_dataflow_job_name(
job_name=job_name, append_job_name=append_job_name
)
self.assertEqual(expected_result, job_name)
#
@parameterized.expand([("1dfjob@",), ("dfjob@",), ("df^jo",)])
def test_build_dataflow_job_name_with_invalid_value(self, job_name):
self.assertRaises(
ValueError, self.dataflow_hook.build_dataflow_job_name, job_name=job_name, append_job_name=False
)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_get_job(self, mock_conn, mock_dataflowjob):
method_fetch_job_by_id = mock_dataflowjob.return_value.fetch_job_by_id
self.dataflow_hook.get_job(job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_by_id.assert_called_once_with(TEST_JOB_ID)
#
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_metrics_by_id = mock_dataflowjob.return_value.fetch_job_metrics_by_id
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_metrics_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_metrics_by_id_controller(self, mock_conn):
method_get_metrics = (
mock_conn.return_value.projects.return_value.locations.return_value.jobs.return_value.getMetrics
)
self.dataflow_hook.fetch_job_metrics_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
method_get_metrics.return_value.execute.assert_called_once_with(num_retries=0)
method_get_metrics.assert_called_once_with(
jobId=TEST_JOB_ID, projectId=TEST_PROJECT_ID, location=TEST_LOCATION
)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_messages_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_messages_by_id = mock_dataflowjob.return_value.fetch_job_messages_by_id
self.dataflow_hook.fetch_job_messages_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_messages_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_fetch_job_autoscaling_events_by_id(self, mock_conn, mock_dataflowjob):
method_fetch_job_autoscaling_events_by_id = (
mock_dataflowjob.return_value.fetch_job_autoscaling_events_by_id
)
self.dataflow_hook.fetch_job_autoscaling_events_by_id(
job_id=TEST_JOB_ID, project_id=TEST_PROJECT_ID, location=TEST_LOCATION
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
method_fetch_job_autoscaling_events_by_id.assert_called_once_with(TEST_JOB_ID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_wait_for_done(self, mock_conn, mock_dataflowjob):
method_wait_for_done = mock_dataflowjob.return_value.wait_for_done
self.dataflow_hook.wait_for_done(
job_name="JOB_NAME",
project_id=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
multiple_jobs=False,
)
mock_conn.assert_called_once()
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
name="JOB_NAME",
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
job_id=TEST_JOB_ID,
num_retries=self.dataflow_hook.num_retries,
multiple_jobs=False,
drain_pipeline=self.dataflow_hook.drain_pipeline,
cancel_timeout=self.dataflow_hook.cancel_timeout,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
method_wait_for_done.assert_called_once_with()
class TestDataflowTemplateHook(unittest.TestCase):
def setUp(self):
self.dataflow_hook = DataflowHook(gcp_conn_id='google_cloud_default')
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow(self, mock_conn, mock_controller, mock_uuid):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
variables = {'zone': 'us-central1-f', 'tempLocation': 'gs://test/temp'}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=copy.deepcopy(variables),
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={
'jobName': f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
'parameters': PARAMETERS,
'environment': variables,
},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id='test-job-id',
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_variable(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={'region': TEST_LOCATION},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
projectId=TEST_PROJECT,
location=TEST_LOCATION,
gcsPath=TEST_TEMPLATE,
body=mock.ANY,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_custom_region_as_parameter(
self, mock_conn, mock_controller, mock_uuid
):
launch_method = (
mock_conn.return_value.projects.return_value.locations.return_value.templates.return_value.launch
)
launch_method.return_value.execute.return_value = {"job": {"id": TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables={},
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
)
launch_method.assert_called_once_with(
body={'jobName': UNIQUE_JOB_NAME, 'parameters': PARAMETERS, 'environment': {}},
gcsPath='gs://dataflow-templates/wordcount/template_file',
projectId=TEST_PROJECT,
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
name=UNIQUE_JOB_NAME,
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_with_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment={"numWorkers": 17},
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": RUNTIME_ENV}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('uuid.uuid4'), return_value=MOCK_UUID)
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_template_dataflow_update_runtime_env(self, mock_conn, mock_dataflowjob, mock_uuid):
options_with_runtime_env = copy.deepcopy(RUNTIME_ENV)
del options_with_runtime_env["numWorkers"]
runtime_env = {"numWorkers": 17}
expected_runtime_env = copy.deepcopy(RUNTIME_ENV)
expected_runtime_env.update(runtime_env)
dataflowjob_instance = mock_dataflowjob.return_value
dataflowjob_instance.wait_for_done.return_value = None
# fmt: off
method = (mock_conn.return_value
.projects.return_value
.locations.return_value
.templates.return_value
.launch)
# fmt: on
method.return_value.execute.return_value = {'job': {'id': TEST_JOB_ID}}
self.dataflow_hook.start_template_dataflow(
job_name=JOB_NAME,
variables=options_with_runtime_env,
parameters=PARAMETERS,
dataflow_template=TEST_TEMPLATE,
project_id=TEST_PROJECT,
environment=runtime_env,
)
body = {"jobName": mock.ANY, "parameters": PARAMETERS, "environment": expected_runtime_env}
method.assert_called_once_with(
projectId=TEST_PROJECT,
location=DEFAULT_DATAFLOW_LOCATION,
gcsPath=TEST_TEMPLATE,
body=body,
)
mock_dataflowjob.assert_called_once_with(
dataflow=mock_conn.return_value,
job_id=TEST_JOB_ID,
location=DEFAULT_DATAFLOW_LOCATION,
name=f'test-dataflow-pipeline-{MOCK_UUID_PREFIX}',
num_retries=5,
poll_sleep=10,
project_number=TEST_PROJECT,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=None,
)
mock_uuid.assert_called_once_with()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_start_flex_template(self, mock_conn, mock_controller):
expected_job = {"id": TEST_JOB_ID}
mock_locations = mock_conn.return_value.projects.return_value.locations
launch_method = mock_locations.return_value.flexTemplates.return_value.launch
launch_method.return_value.execute.return_value = {"job": expected_job}
mock_controller.return_value.get_jobs.return_value = [{"id": TEST_JOB_ID}]
on_new_job_callback = mock.MagicMock()
result = self.dataflow_hook.start_flex_template(
body={"launchParameter": TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
project_id=TEST_PROJECT_ID,
on_new_job_callback=on_new_job_callback,
)
on_new_job_callback.assert_called_once_with(expected_job)
launch_method.assert_called_once_with(
projectId='test-project-id',
body={'launchParameter': TEST_FLEX_PARAMETERS},
location=TEST_LOCATION,
)
mock_controller.assert_called_once_with(
dataflow=mock_conn.return_value,
project_number=TEST_PROJECT_ID,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=self.dataflow_hook.poll_sleep,
num_retries=self.dataflow_hook.num_retries,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
wait_until_finished=self.dataflow_hook.wait_until_finished,
)
mock_controller.return_value.get_jobs.assert_called_once_with(refresh=True)
assert result == {"id": TEST_JOB_ID}
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
def test_cancel_job(self, mock_get_conn, jobs_controller):
self.dataflow_hook.cancel_job(
job_name=UNIQUE_JOB_NAME, job_id=TEST_JOB_ID, project_id=TEST_PROJECT, location=TEST_LOCATION
)
jobs_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
name=UNIQUE_JOB_NAME,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
cancel_timeout=DEFAULT_CANCEL_TIMEOUT,
)
jobs_controller.cancel()
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job_failed_to_run(
self, mock_run, mock_get_conn, mock_provide_authorized_gcloud, mock_controller
):
test_job = {'id': "TEST_JOB_ID"}
mock_controller.return_value.get_jobs.return_value = [test_job]
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=0
)
on_new_job_callback = mock.MagicMock()
result = self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_callback=on_new_job_callback,
)
mock_run.assert_called_once_with(
[
'gcloud',
'dataflow',
'sql',
'query',
TEST_SQL_QUERY,
'--project=test-project',
'--format=value(job.id)',
'--job-name=test-sql-job-name',
'--region=custom-location',
'--bigquery-project=test-project',
'--bigquery-dataset=test-dataset',
'--bigquery-table=beam_output',
'--bigquery-write-disposition=write-truncate',
],
capture_output=True,
)
mock_controller.assert_called_once_with(
dataflow=mock_get_conn.return_value,
job_id=TEST_JOB_ID,
location=TEST_LOCATION,
poll_sleep=10,
project_number=TEST_PROJECT,
num_retries=5,
drain_pipeline=False,
wait_until_finished=None,
)
mock_controller.return_value.wait_for_done.assert_called_once()
assert result == test_job
@mock.patch(DATAFLOW_STRING.format('DataflowHook.get_conn'))
@mock.patch(DATAFLOW_STRING.format('DataflowHook.provide_authorized_gcloud'))
@mock.patch(DATAFLOW_STRING.format('subprocess.run'))
def test_start_sql_job(self, mock_run, mock_provide_authorized_gcloud, mock_get_conn):
mock_run.return_value = mock.MagicMock(
stdout=f"{TEST_JOB_ID}\n".encode(), stderr=f"{TEST_JOB_ID}\n".encode(), returncode=1
)
with pytest.raises(AirflowException):
self.dataflow_hook.start_sql_job(
job_name=TEST_SQL_JOB_NAME,
query=TEST_SQL_QUERY,
options=TEST_SQL_OPTIONS,
location=TEST_LOCATION,
project_id=TEST_PROJECT,
on_new_job_callback=mock.MagicMock(),
)
class TestDataflowJob(unittest.TestCase):
def setUp(self):
self.mock_dataflow = MagicMock()
def test_dataflow_job_init_with_job_id(self):
mock_jobs = MagicMock()
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value = mock_jobs
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME, TEST_JOB_ID
).get_jobs()
mock_jobs.get.assert_called_once_with(
projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID
)
def test_dataflow_job_init_without_job_id(self):
job = {"id": TEST_JOB_ID, "name": UNIQUE_JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE}
mock_list = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list
(mock_list.return_value.execute.return_value) = {'jobs': [job]}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
_DataflowJobsController(
self.mock_dataflow, TEST_PROJECT, TEST_LOCATION, 10, UNIQUE_JOB_NAME
).get_jobs()
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION)
def test_dataflow_job_wait_for_multiple_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [job, job]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.\
list.assert_called_once_with(location=TEST_LOCATION, projectId=TEST_PROJECT)
self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.list\
.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job, job]
@parameterized.expand(
[
(DataflowJobStatus.JOB_STATE_FAILED, "Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobStatus.JOB_STATE_CANCELLED, "Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobStatus.JOB_STATE_DRAINED, "Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobStatus.JOB_STATE_UPDATED, "Google Cloud Dataflow job name-2 was updated\\."),
(
DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN",
),
]
)
def test_dataflow_job_wait_for_multiple_jobs_and_one_in_terminal_state(self, state, exception_regex):
# fmt: off
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list.return_value.
execute.return_value
) = {
"jobs": [
{
"id": "id-1", "name": "name-1",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE
},
{
"id": "id-2", "name": "name-2",
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": state
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job.wait_for_done()
def test_dataflow_job_wait_for_multiple_jobs_and_streaming_jobs(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": [
{
"id": "id-2",
"name": "name-2",
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": DataflowJobType.JOB_TYPE_STREAMING
}
]
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
dataflow_job.wait_for_done()
assert 1 == mock_jobs_list.call_count
def test_dataflow_job_wait_for_single_jobs(self):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"type": DataflowJobType.JOB_TYPE_BATCH,
"currentState": DataflowJobStatus.JOB_STATE_DONE,
}
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.return_value = job
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.wait_for_done()
# fmt: off
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.assert_called_once_with(
jobId=TEST_JOB_ID,
location=TEST_LOCATION,
projectId=TEST_PROJECT
)
self.mock_dataflow.projects.return_value.locations.return_value. \
jobs.return_value.get.return_value.execute.assert_called_once_with(num_retries=20)
# fmt: on
assert dataflow_job.get_jobs() == [job]
def test_dataflow_job_is_job_running_with_no_job(self):
# fmt: off
mock_jobs_list = (
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list
)
mock_jobs_list.return_value.execute.return_value = {
"jobs": []
}
(
self.mock_dataflow.projects.return_value.
locations.return_value.
jobs.return_value.
list_next.return_value
) = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
result = dataflow_job.is_job_running()
assert result is False
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, None, True),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_PENDING, False, True),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_wait_until_finished(
self, job_type, job_state, wait_until_finished, expected_result
):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
# RUNNING
(DataflowJobStatus.JOB_STATE_RUNNING, None, False),
(DataflowJobStatus.JOB_STATE_RUNNING, True, False),
(DataflowJobStatus.JOB_STATE_RUNNING, False, True),
# AWAITING STATE
(DataflowJobStatus.JOB_STATE_PENDING, None, False),
(DataflowJobStatus.JOB_STATE_PENDING, True, False),
(DataflowJobStatus.JOB_STATE_PENDING, False, True),
])
# fmt: on
def test_check_dataflow_job_state_without_job_type(self, job_state, wait_until_finished, expected_result):
job = {"id": "id-2", "name": "name-2", "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
wait_until_finished=wait_until_finished,
)
result = dataflow_job._check_dataflow_job_state(job)
assert result == expected_result
# fmt: off
@parameterized.expand([
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_FAILED,
"Google Cloud Dataflow job name-2 has failed\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UNKNOWN,
"Google Cloud Dataflow job name-2 was unknown state: JOB_STATE_UNKNOWN"),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_CANCELLED,
"Google Cloud Dataflow job name-2 was cancelled\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_DRAINED,
"Google Cloud Dataflow job name-2 was drained\\."),
(DataflowJobType.JOB_TYPE_BATCH, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
(DataflowJobType.JOB_TYPE_STREAMING, DataflowJobStatus.JOB_STATE_UPDATED,
"Google Cloud Dataflow job name-2 was updated\\."),
])
# fmt: on
def test_check_dataflow_job_state_terminal_state(self, job_type, job_state, exception_regex):
job = {"id": "id-2", "name": "name-2", "type": job_type, "currentState": job_state}
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name="name-",
location=TEST_LOCATION,
poll_sleep=0,
job_id=None,
num_retries=20,
multiple_jobs=True,
)
with pytest.raises(Exception, match=exception_regex):
dataflow_job._check_dataflow_job_state(job)
def test_dataflow_job_cancel_job(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_RUNNING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_PENDING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_QUEUED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_STOPPED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
@mock.patch("airflow.providers.google.cloud.hooks.dataflow.timeout")
@mock.patch("time.sleep")
def test_dataflow_job_cancel_job_cancel_timeout(self, mock_sleep, mock_timeout):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLING},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=4,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
cancel_timeout=10,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = mock_jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': 'JOB_STATE_CANCELLED'},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_sleep.assert_has_calls([mock.call(4), mock.call(4), mock.call(4)])
mock_timeout.assert_called_once_with(
seconds=10, error_message='Canceling jobs failed due to timeout (10s): test-job-id'
)
@parameterized.expand(
[
(False, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(False, "JOB_TYPE_STREAMING", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_BATCH", "JOB_STATE_CANCELLED"),
(True, "JOB_TYPE_STREAMING", "JOB_STATE_DRAINED"),
]
)
def test_dataflow_job_cancel_or_drain_job(self, drain_pipeline, job_type, requested_state):
job = {
"id": TEST_JOB_ID,
"name": UNIQUE_JOB_NAME,
"currentState": DataflowJobStatus.JOB_STATE_RUNNING,
"type": job_type,
}
get_method = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.get
get_method.return_value.execute.return_value = job
# fmt: off
job_list_nest_method = (self.mock_dataflow
.projects.return_value.
locations.return_value.
jobs.return_value.list_next)
job_list_nest_method.return_value = None
# fmt: on
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=10,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
drain_pipeline=drain_pipeline,
cancel_timeout=None,
)
dataflow_job.cancel()
get_method.assert_called_once_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_once_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_called_once_with()
mock_batch = self.mock_dataflow.new_batch_http_request.return_value
mock_update = self.mock_dataflow.projects.return_value.locations.return_value.jobs.return_value.update
mock_update.assert_called_once_with(
body={'requestedState': requested_state},
jobId='test-job-id',
location=TEST_LOCATION,
projectId='test-project',
)
mock_batch.add.assert_called_once_with(mock_update.return_value)
mock_batch.execute.assert_called_once()
def test_dataflow_job_cancel_job_no_running_jobs(self):
mock_jobs = self.mock_dataflow.projects.return_value.locations.return_value.jobs
get_method = mock_jobs.return_value.get
get_method.return_value.execute.side_effect = [
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DONE},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_UPDATED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_DRAINED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_FAILED},
{"id": TEST_JOB_ID, "name": JOB_NAME, "currentState": DataflowJobStatus.JOB_STATE_CANCELLED},
]
mock_jobs.return_value.list_next.return_value = None
dataflow_job = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
name=UNIQUE_JOB_NAME,
location=TEST_LOCATION,
poll_sleep=0,
job_id=TEST_JOB_ID,
num_retries=20,
multiple_jobs=False,
)
dataflow_job.cancel()
get_method.assert_called_with(jobId=TEST_JOB_ID, location=TEST_LOCATION, projectId=TEST_PROJECT)
get_method.return_value.execute.assert_called_with(num_retries=20)
self.mock_dataflow.new_batch_http_request.assert_not_called()
mock_jobs.return_value.update.assert_not_called()
def test_fetch_list_job_messages_responses(self):
# fmt: off
mock_list = (
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.messages.return_value
.list
)
mock_list_next = (
self.mock_dataflow.
projects.return_value.
locations.return_value.
jobs.return_value
.messages.return_value
.list_next
)
# fmt: on
mock_list.return_value.execute.return_value = "response_1"
mock_list_next.return_value = None
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = list(jobs_controller._fetch_list_job_messages_responses(TEST_JOB_ID))
mock_list.assert_called_once_with(projectId=TEST_PROJECT, location=TEST_LOCATION, jobId=TEST_JOB_ID)
mock_list_next.assert_called_once_with(
previous_request=mock_list.return_value, previous_response="response_1"
)
assert result == ["response_1"]
def test_fetch_all_jobs_when_no_jobs_returned(self):
# fmt: off
(
self.mock_dataflow
.projects.return_value
.locations.return_value
.jobs.return_value
.list.return_value
.execute.return_value
) = {}
# fmt: on
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller._fetch_all_jobs()
assert result == []
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_messages_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"jobMessages": ["message_1"]},
{"jobMessages": ["message_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_messages_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['message_1', 'message_2']
@mock.patch(DATAFLOW_STRING.format('_DataflowJobsController._fetch_list_job_messages_responses'))
def test_fetch_job_autoscaling_events_by_id(self, mock_fetch_responses):
mock_fetch_responses.return_value = iter(
[
{"autoscalingEvents": ["event_1"]},
{"autoscalingEvents": ["event_2"]},
]
)
jobs_controller = _DataflowJobsController(
dataflow=self.mock_dataflow,
project_number=TEST_PROJECT,
location=TEST_LOCATION,
job_id=TEST_JOB_ID,
)
result = jobs_controller.fetch_job_autoscaling_events_by_id(TEST_JOB_ID)
mock_fetch_responses.assert_called_once_with(job_id=TEST_JOB_ID)
assert result == ['event_1', 'event_2']
APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG = f""""\
Dataflow SDK version: 2.14.0
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobsDetail/locations/europe-west3/jobs/{TEST_JOB_ID}?project=XXX
Submitted job: {TEST_JOB_ID}
Jun 15, 2020 2:57:28 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG = f""""\
INFO: Dataflow SDK version: 2.22.0
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/dataflow\
/jobs/europe-west3/{TEST_JOB_ID}?project=XXXX
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: Submitted job: {TEST_JOB_ID}
Jun 15, 2020 3:09:03 PM org.apache.beam.runners.dataflow.DataflowRunner run
INFO: To cancel the job using the 'gcloud' tool, run:
> gcloud dataflow jobs --project=XXX cancel --region=europe-west3 {TEST_JOB_ID}
"""
APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG = f""""\
INFO:root:Completed GCS upload to gs://test-dataflow-example/staging/start-python-job-local-5bcf3d71.\
1592286375.000962/apache_beam-2.14.0-cp37-cp37m-manylinux1_x86_64.whl in 0 seconds.
INFO:root:Create job: <Job
createTime: '2020-06-16T05:46:20.911857Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:46:20.911857Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:root:Created job with id: [{TEST_JOB_ID}]
INFO:root:To access the Dataflow monitoring console, please navigate to https://console.cloud.google.com/\
dataflow/jobsDetail/locations/us-central1/jobs/{TEST_JOB_ID}?project=XXX
"""
APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG = f""""\
INFO:apache_beam.runners.dataflow.internal.apiclient:Completed GCS upload to gs://test-dataflow-example/\
staging/start-python-job-local-5bcf3d71.1592286719.303624/apache_beam-2.22.0-cp37-cp37m-manylinux1_x86_64.whl\
in 1 seconds.
INFO:apache_beam.runners.dataflow.internal.apiclient:Create job: <Job
createTime: '2020-06-16T05:52:04.095216Z'
currentStateTime: '1970-01-01T00:00:00Z'
id: '{TEST_JOB_ID}'
location: 'us-central1'
name: 'start-python-job-local-5bcf3d71'
projectId: 'XXX'
stageStates: []
startTime: '2020-06-16T05:52:04.095216Z'
steps: []
tempFiles: []
type: TypeValueValuesEnum(JOB_TYPE_BATCH, 1)>
INFO:apache_beam.runners.dataflow.internal.apiclient:Created job with id: [{TEST_JOB_ID}]
INFO:apache_beam.runners.dataflow.internal.apiclient:Submitted job: {TEST_JOB_ID}
INFO:apache_beam.runners.dataflow.internal.apiclient:To access the Dataflow monitoring console, please \
navigate to https://console.cloud.google.com/dataflow/jobs/us-central1/{TEST_JOB_ID}?project=XXX
"""
class TestDataflow(unittest.TestCase):
@parameterized.expand(
[
(APACHE_BEAM_V_2_14_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_JAVA_SDK_LOG,),
(APACHE_BEAM_V_2_14_0_PYTHON_SDK_LOG,),
(APACHE_BEAM_V_2_22_0_PYTHON_SDK_LOG,),
],
name_func=lambda func, num, p: f"{func.__name__}_{num}",
)
def test_data_flow_valid_job_id(self, log):
echos = ";".join(f"echo {shlex.quote(line)}" for line in log.split("\n"))
cmd = ["bash", "-c", echos]
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, TEST_JOB_ID)
def test_data_flow_missing_job_id(self):
cmd = ['echo', 'unit testing']
found_job_id = None
def callback(job_id):
nonlocal found_job_id
found_job_id = job_id
BeamCommandRunner(
cmd, process_line_callback=process_line_and_extract_dataflow_job_id_callback(callback)
).wait_for_done()
self.assertEqual(found_job_id, None)
@mock.patch('airflow.providers.apache.beam.hooks.beam.BeamCommandRunner.log')
@mock.patch('subprocess.Popen')
@mock.patch('select.select')
def test_dataflow_wait_for_done_logging(self, mock_select, mock_popen, mock_logging):
mock_logging.info = MagicMock()
mock_logging.warning = MagicMock()
mock_proc = MagicMock()
mock_proc.stderr = MagicMock()
mock_proc.stderr.readlines = MagicMock(return_value=['test\n', 'error\n'])
mock_stderr_fd = MagicMock()
mock_proc.stderr.fileno = MagicMock(return_value=mock_stderr_fd)
mock_proc_poll = MagicMock()
mock_select.return_value = [[mock_stderr_fd]]
def poll_resp_error():
mock_proc.return_code = 1
return True
mock_proc_poll.side_effect = [None, poll_resp_error]
mock_proc.poll = mock_proc_poll
mock_popen.return_value = mock_proc
dataflow = BeamCommandRunner(['test', 'cmd'])
mock_logging.info.assert_called_once_with('Running command: %s', 'test cmd')
self.assertRaises(Exception, dataflow.wait_for_done)
| {
"content_hash": "89a956409fefe88192b15a6144a0c56d",
"timestamp": "",
"source": "github",
"line_count": 1853,
"max_line_length": 110,
"avg_line_length": 41.99082568807339,
"alnum_prop": 0.6220745672094488,
"repo_name": "cfei18/incubator-airflow",
"id": "0076c3f879d56d5e79badf18b035ad9edffbba42",
"size": "78596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/hooks/test_dataflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
} |
"""Library containing special bots."""
#
# (C) Rob W.W. Hooft, Andre Engels 2003-2004
# (C) Pywikibot team, 2003-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import tempfile
import time
import pywikibot
import pywikibot.data.api
from pywikibot import config
from pywikibot.bot import (
BaseBot, ExistingPageBot, NoRedirectPageBot, AutomaticTWSummaryBot,
InteractiveReplace, ChoiceException, UnhandledAnswer, AlwaysChoice,
QuitKeyboardInterrupt,
)
from pywikibot.editor import TextEditor
from pywikibot.textlib import replace_links
from pywikibot.tools import PY2, deprecated, deprecated_args
from pywikibot.tools.formatter import color_format
if not PY2:
from urllib.parse import urlparse
from urllib.request import URLopener
basestring = (str,)
else:
from urllib import URLopener
from urlparse import urlparse
class UploadRobot(BaseBot):
"""Upload bot."""
@deprecated_args(uploadByUrl=None)
def __init__(self, url, urlEncoding=None, description=u'',
useFilename=None, keepFilename=False,
verifyDescription=True, ignoreWarning=False,
targetSite=None, aborts=[], chunk_size=0,
summary=None, **kwargs):
"""
Constructor.
@param url: path to url or local file (deprecated), or list of urls or
paths to local files.
@type url: string (deprecated) or list
@param description: Description of file for its page. If multiple files
are uploading the same description is used for every file.
@type description: string
@param useFilename: Specify title of the file's page. If multiple
files are uploading it asks to change the name for second, third,
etc. files, otherwise the last file will overwrite the other.
@type useFilename: string
@param keepFilename: Set to True to keep original names of urls and
files, otherwise it will ask to enter a name for each file.
@type keepFilename: bool
@param summary: Summary of the upload
@type summary: string
@param verifyDescription: Set to True to proofread the description.
@type verifyDescription: bool
@param ignoreWarning: Set this to True to upload even if another file
would be overwritten or another mistake would be risked. Set it to
an array of warning codes to selectively ignore specific warnings.
@type ignoreWarning: bool or list
@param targetSite: Set the site to upload to. If target site is not
given it's taken from user-config.py.
@type targetSite: object
@param aborts: List of the warning types to abort upload on. Set to
True to abort on any warning.
@type aborts: bool or list
@param chunk_size: Upload the file in chunks (more overhead, but
restartable) specified in bytes. If no value is specified the file
will be uploaded as whole.
@type chunk_size: integer
@param always: Disables any input, requires that either ignoreWarning
or aborts are set to True and that the description is also set. It
overwrites verifyDescription to False and keepFilename to True.
@type always: bool
@deprecated: Using upload_image() is deprecated, use upload_file() with
file_url param instead
"""
super(UploadRobot, self).__init__(**kwargs)
always = self.getOption('always')
if (always and ignoreWarning is not True and aborts is not True):
raise ValueError('When always is set to True, either '
'ignoreWarning or aborts must be set to True.')
if always and not description:
raise ValueError('When always is set to True, the description '
'must be set.')
self.url = url
if isinstance(self.url, basestring):
pywikibot.warning("url as string is deprecated. "
"Use an iterable instead.")
self.urlEncoding = urlEncoding
self.description = description
self.useFilename = useFilename
self.keepFilename = keepFilename or always
self.verifyDescription = verifyDescription and not always
self.ignoreWarning = ignoreWarning
self.aborts = aborts
self.chunk_size = chunk_size
self.summary = summary
if config.upload_to_commons:
self.targetSite = targetSite or pywikibot.Site('commons',
'commons')
else:
self.targetSite = targetSite or pywikibot.Site()
self.targetSite.login()
@deprecated()
def urlOK(self):
"""Return True if self.url is a URL or an existing local file."""
return "://" in self.url or os.path.exists(self.url)
def read_file_content(self, file_url=None):
"""Return name of temp file in which remote file is saved."""
if not file_url:
file_url = self.url
pywikibot.warning("file_url is not given. "
"Set to self.url by default.")
pywikibot.output(u'Reading file %s' % file_url)
resume = False
rlen = 0
_contents = None
dt = 15
uo = URLopener()
retrieved = False
while not retrieved:
if resume:
pywikibot.output(u"Resume download...")
uo.addheader('Range', 'bytes=%s-' % rlen)
infile = uo.open(file_url)
info = infile.info()
if PY2:
content_type = info.getheader('Content-Type')
content_len = info.getheader('Content-Length')
accept_ranges = info.getheader('Accept-Ranges')
else:
content_type = info.get('Content-Type')
content_len = info.get('Content-Length')
accept_ranges = info.get('Accept-Ranges')
if 'text/html' in content_type:
pywikibot.output(u"Couldn't download the image: "
"the requested URL was not found on server.")
return
valid_ranges = accept_ranges == 'bytes'
if resume:
_contents += infile.read()
else:
_contents = infile.read()
infile.close()
retrieved = True
if content_len:
rlen = len(_contents)
content_len = int(content_len)
if rlen < content_len:
retrieved = False
pywikibot.output(
u"Connection closed at byte %s (%s left)"
% (rlen, content_len))
if valid_ranges and rlen > 0:
resume = True
pywikibot.output(u"Sleeping for %d seconds..." % dt)
time.sleep(dt)
if dt <= 60:
dt += 15
elif dt < 360:
dt += 60
else:
pywikibot.log(
u"WARNING: length check of retrieved data not possible.")
handle, tempname = tempfile.mkstemp()
with os.fdopen(handle, "wb") as t:
t.write(_contents)
return tempname
def _handle_warning(self, warning):
"""
Return whether the warning cause an abort or be ignored.
@param warning: The warning name
@type warning: str
@return: False if this warning should cause an abort, True if it should
be ignored or None if this warning has no default handler.
@rtype: bool or None
"""
if self.aborts is not True:
if warning in self.aborts:
return False
if self.ignoreWarning is True or (self.ignoreWarning is not False and
warning in self.ignoreWarning):
return True
return None if self.aborts is not True else False
def _handle_warnings(self, warnings):
messages = '\n'.join('{0.code}: {0.info}'.format(warning)
for warning in sorted(warnings,
key=lambda w: w.code))
if len(warnings) > 1:
messages = '\n' + messages
pywikibot.output('We got the following warning(s): ' + messages)
answer = True
for warning in warnings:
this_answer = self._handle_warning(warning.code)
if this_answer is False:
answer = False
break
elif this_answer is None:
answer = None
if answer is None:
answer = pywikibot.input_yn(u"Do you want to ignore?",
default=False, automatic_quit=False)
return answer
def process_filename(self, file_url=None):
"""Return base filename portion of file_url."""
if not file_url:
file_url = self.url
pywikibot.warning("file_url is not given. "
"Set to self.url by default.")
always = self.getOption('always')
# Isolate the pure name
filename = file_url
# Filename may be either a URL or a local file path
if "://" in filename:
# extract the path portion of the URL
filename = urlparse(filename).path
filename = os.path.basename(filename)
if self.useFilename:
filename = self.useFilename
if not self.keepFilename:
pywikibot.output(
u"The filename on the target wiki will default to: %s"
% filename)
assert not always
newfn = pywikibot.input(
u'Enter a better name, or press enter to accept:')
if newfn != "":
filename = newfn
# FIXME: these 2 belong somewhere else, presumably in family
# forbidden characters are handled by pywikibot/page.py
forbidden = ':*?/\\' # to be extended
try:
allowed_formats = self.targetSite.siteinfo.get(
'fileextensions', get_default=False)
except KeyError:
allowed_formats = []
else:
allowed_formats = [item['ext'] for item in allowed_formats]
# ask until it's valid
first_check = True
while True:
if not first_check:
if always:
filename = None
else:
filename = pywikibot.input('Enter a better name, or press '
'enter to skip the file:')
if not filename:
return None
first_check = False
ext = os.path.splitext(filename)[1].lower().strip('.')
# are any chars in forbidden also in filename?
invalid = set(forbidden) & set(filename)
if invalid:
c = "".join(invalid)
pywikibot.output(
'Invalid character(s): %s. Please try again' % c)
continue
if allowed_formats and ext not in allowed_formats:
if always:
pywikibot.output('File format is not one of '
'[{0}]'.format(' '.join(allowed_formats)))
continue
elif not pywikibot.input_yn(
u"File format is not one of [%s], but %s. Continue?"
% (u' '.join(allowed_formats), ext),
default=False, automatic_quit=False):
continue
potential_file_page = pywikibot.FilePage(self.targetSite, filename)
if potential_file_page.exists():
overwrite = self._handle_warning('exists')
if overwrite is False:
pywikibot.output(
'File exists and you asked to abort. Skipping.')
return None
if potential_file_page.canBeEdited():
if overwrite is None:
overwrite = not pywikibot.input_yn(
"File with name %s already exists. "
"Would you like to change the name? "
"(Otherwise file will be overwritten.)"
% filename, default=True,
automatic_quit=False)
if not overwrite:
continue
else:
break
else:
pywikibot.output(u"File with name %s already exists and "
"cannot be overwritten." % filename)
continue
else:
try:
if potential_file_page.fileIsShared():
pywikibot.output(
'File with name %s already exists in shared '
'repository and cannot be overwritten.' % filename)
continue
else:
break
except pywikibot.NoPage:
break
# A proper description for the submission.
# Empty descriptions are not accepted.
if self.description:
pywikibot.output('The suggested description is:\n%s'
% self.description)
while not self.description or self.verifyDescription:
if not self.description:
pywikibot.output(color_format(
'{lightred}It is not possible to upload a file '
'without a description.{default}'))
assert not always
# if no description, ask if user want to add one or quit,
# and loop until one is filled.
# if self.verifyDescription, ask if user want to change it
# or continue.
if self.description:
question = 'Do you want to change this description?'
else:
question = 'No description was given. Add one?'
if pywikibot.input_yn(question, default=not self.description,
automatic_quit=self.description):
from pywikibot import editor as editarticle
editor = editarticle.TextEditor()
try:
newDescription = editor.edit(self.description)
except ImportError:
raise
except Exception as e:
pywikibot.error(e)
continue
# if user saved / didn't press Cancel
if newDescription:
self.description = newDescription
elif not self.description:
raise QuitKeyboardInterrupt
self.verifyDescription = False
return filename
def abort_on_warn(self, warn_code):
"""Determine if the warning message should cause an abort."""
if self.aborts is True:
return True
else:
return warn_code in self.aborts
def ignore_on_warn(self, warn_code):
"""Determine if the warning message should be ignored."""
if self.ignoreWarning is True:
return True
else:
return warn_code in self.ignoreWarning
@deprecated('UploadRobot.upload_file()')
@deprecated_args(debug=None)
def upload_image(self):
"""Upload image."""
return self.upload_file(self.url)
@deprecated_args(debug=None)
def upload_file(self, file_url, _file_key=None, _offset=0):
"""Upload the image at file_url to the target wiki.
Return the filename that was used to upload the image.
If the upload fails, ask the user whether to try again or not.
If the user chooses not to retry, return None.
"""
filename = self.process_filename(file_url)
if not filename:
return None
site = self.targetSite
imagepage = pywikibot.FilePage(site, filename) # normalizes filename
imagepage.text = self.description
pywikibot.output('Uploading file to {0}...'.format(site))
success = False
ignore_warnings = self.ignoreWarning is True or self._handle_warnings
if ('://' in file_url and
'upload_by_url' not in site.userinfo['rights']):
file_url = self.read_file_content(file_url)
try:
success = imagepage.upload(file_url,
ignore_warnings=ignore_warnings,
chunk_size=self.chunk_size,
_file_key=_file_key, _offset=_offset,
comment=self.summary)
except pywikibot.data.api.APIError as error:
if error.code == u'uploaddisabled':
pywikibot.error(
'Upload error: Local file uploads are disabled on %s.'
% site)
else:
pywikibot.error("Upload error: ", exc_info=True)
return None
except Exception:
pywikibot.error("Upload error: ", exc_info=True)
return None
else:
if success:
# No warning, upload complete.
pywikibot.output(u"Upload of %s successful." % filename)
self._save_counter += 1
return filename # data['filename']
else:
pywikibot.output(u"Upload aborted.")
return None
def run(self):
"""Run bot."""
# early check that upload is enabled
if self.targetSite.is_uploaddisabled():
pywikibot.error(
"Upload error: Local file uploads are disabled on %s."
% self.targetSite)
return
# early check that user has proper rights to upload
if "upload" not in self.targetSite.userinfo["rights"]:
pywikibot.error(
"User '%s' does not have upload rights on site %s."
% (self.targetSite.user(), self.targetSite))
return
try:
if isinstance(self.url, basestring):
self._treat_counter = 1
return self.upload_file(self.url)
for file_url in self.url:
self.upload_file(file_url)
self._treat_counter += 1
except QuitKeyboardInterrupt:
pywikibot.output('\nUser quit %s bot run...' %
self.__class__.__name__)
except KeyboardInterrupt:
if config.verbose_output:
raise
else:
pywikibot.output('\nKeyboardInterrupt during %s bot run...' %
self.__class__.__name__)
finally:
self.exit()
class EditReplacement(ChoiceException, UnhandledAnswer):
"""The text should be edited and replacement should be restarted."""
def __init__(self):
"""Constructor."""
super(EditReplacement, self).__init__('edit', 'e')
self.stop = True
class InteractiveUnlink(InteractiveReplace):
"""An implementation which just allows unlinking."""
def __init__(self, bot):
"""Create default settings."""
super(InteractiveUnlink, self).__init__(
old_link=bot.pageToUnlink, new_link=False, default='u')
self._always = AlwaysChoice(self, 'unlink all pages', 'a')
self._always.always = bot.getOption('always')
self.additional_choices = [
AlwaysChoice(self, 'unlink all on page', 'p'),
self._always, EditReplacement()]
self._bot = bot
self.context = 100
self.context_change = 100
def handle_answer(self, choice):
"""Handle choice and store in bot's options."""
answer = super(InteractiveUnlink, self).handle_answer(choice)
self._bot.options['always'] = self._always.always
return answer
class BaseUnlinkBot(ExistingPageBot, NoRedirectPageBot, AutomaticTWSummaryBot):
"""A basic bot unlinking a given link from the current page."""
def __init__(self, **kwargs):
"""Redirect all parameters and add namespace as an available option."""
self.availableOptions.update({
'namespaces': [],
# Which namespaces should be processed?
# default to [] which means all namespaces will be processed
})
super(BaseUnlinkBot, self).__init__(**kwargs)
def _create_callback(self):
"""Create a new callback instance for replace_links."""
return InteractiveUnlink(self)
def unlink(self, target_page):
"""Unlink all links linking to the target page."""
text = self.current_page.text
while True:
unlink_callback = self._create_callback()
try:
text = replace_links(text, unlink_callback, target_page.site)
except EditReplacement:
new_text = TextEditor().edit(
unlink_callback.current_text,
jumpIndex=unlink_callback.current_range[0])
# if user didn't press Cancel
if new_text:
text = new_text
else:
text = unlink_callback.current_text
else:
break
self.put_current(text)
| {
"content_hash": "c7f57cb67d74ad81682e269a2a60b804",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 79,
"avg_line_length": 39.643115942028984,
"alnum_prop": 0.5439382168806837,
"repo_name": "magul/pywikibot-core",
"id": "0818e72799607e912e6afb681a927d4612fed533",
"size": "21925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywikibot/specialbots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4538707"
}
],
"symlink_target": ""
} |
import logging
from django.db import migrations
logger = logging.getLogger()
def update_bearing_manufacturer(apps, schema_editor):
Bearing = apps.get_model('bearing', 'Bearing')
BearingManufacturer = apps.get_model('bearing', 'BearingManufacturer')
for item in Bearing.objects.all():
try:
item.manufacturer = BearingManufacturer.objects.get(name=item.cipher)
item.save()
except BearingManufacturer.DoesNotExist:
logger.error('Cipher %s does not exists' % item.cipher)
class Migration(migrations.Migration):
dependencies = [
('bearing', '0012_create_manufacturers'),
]
operations = [
migrations.RunPython(update_bearing_manufacturer),
]
| {
"content_hash": "cb2d7eedc0ca612df3674c4d5ef9dd72",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 27.40740740740741,
"alnum_prop": 0.6756756756756757,
"repo_name": "manti-by/POD",
"id": "2f6ee589de45c59de2e161fd5c0b8af5b63a80c5",
"size": "740",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/bearing/migrations/0013_update_bearing_manufacturer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5309"
},
{
"name": "HTML",
"bytes": "40177"
},
{
"name": "JavaScript",
"bytes": "5512"
},
{
"name": "Python",
"bytes": "128218"
},
{
"name": "Shell",
"bytes": "4923"
}
],
"symlink_target": ""
} |
from ..utils import Scraper
from .utsg import UTSGExams
from .utm import UTMExams
from .utsc import UTSCExams
from bs4 import BeautifulSoup
from collections import OrderedDict
import json
import os
import requests
class Exams:
host = 'http://www.artsandscience.utoronto.ca/ofr/calendar/'
@staticmethod
def scrape(location='.'):
Scraper.logger.info('Exams initialized.')
UTSGExams.scrape(location)
UTMExams.scrape(location)
UTSCExams.scrape(location)
Scraper.logger.info('Exams completed.')
| {
"content_hash": "6a525f701506dd24c6775fab1273cd74",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 64,
"avg_line_length": 24.818181818181817,
"alnum_prop": 0.7252747252747253,
"repo_name": "g3wanghc/uoft-scrapers",
"id": "0efce98d1119feea351e08cfd205d04039b6b514",
"size": "546",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "uoftscrapers/scrapers/exams/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85021"
}
],
"symlink_target": ""
} |
import gettext
gettext.install('heat', unicode=1)
from heat.api.middleware.version_negotiation import VersionNegotiationFilter
from heat.api.cfn import versions
def version_negotiation_filter(app, conf, **local_conf):
return VersionNegotiationFilter(versions.Controller, app,
conf, **local_conf)
| {
"content_hash": "2c7ecbd429bbc07c04b87164ab415cdd",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 30.90909090909091,
"alnum_prop": 0.7147058823529412,
"repo_name": "Triv90/Heat",
"id": "b39691f4a0836d7a57caf765f15f904f85f6f844",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/grizzly",
"path": "heat/api/cfn/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1733741"
},
{
"name": "Shell",
"bytes": "19255"
}
],
"symlink_target": ""
} |
"""
Tests for unknown directives.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['unknown'] = [
["""\
.. reStructuredText-unknown-directive::
.. reStructuredText-unknown-directive:: argument
.. reStructuredText-unknown-directive::
block
""",
"""\
<document source="test data">
<system_message level="1" line="1" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="1" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive::
<system_message level="1" line="3" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="3" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive:: argument
<system_message level="1" line="5" source="test data" type="INFO">
<paragraph>
No directive entry for "reStructuredText-unknown-directive" in module "docutils.parsers.rst.languages.en".
Trying "reStructuredText-unknown-directive" as canonical directive name.
<system_message level="3" line="5" source="test data" type="ERROR">
<paragraph>
Unknown directive type "reStructuredText-unknown-directive".
<literal_block xml:space="preserve">
.. reStructuredText-unknown-directive::
block
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| {
"content_hash": "0b2902c984450eb121f9ec078c5a9a62",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 118,
"avg_line_length": 37.28813559322034,
"alnum_prop": 0.6645454545454546,
"repo_name": "cloudera/hue",
"id": "69e2c01c98ef885ce8bcfeb52734693609cdc157",
"size": "2392",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/docutils-0.14/test/test_parsers/test_rst/test_directives/test_unknown.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
import datetime
import webapp2
from testing_utils import testing
from handlers import list_analyses
from model.wf_analysis import WfAnalysis
from model import wf_analysis_status
from model import wf_analysis_result_status
from waterfall import identify_culprit_pipeline
class ListAnalysesTest(testing.AppengineTestCase):
app_module = webapp2.WSGIApplication(
[('/list-analyses', list_analyses.ListAnalyses),], debug=True)
def setUp(self):
super(ListAnalysesTest, self).setUp()
self.stored_dates = self._AddAnalysisResults()
def testListAnalysesHandler(self):
response = self.test_app.get('/list-analyses')
self.assertEqual(200, response.status_int)
def _AddAnalysisResult(self, master_name, builder_name, build_number):
analysis = WfAnalysis.Create(master_name, builder_name, build_number)
analysis.status = wf_analysis_status.ANALYZING
analysis.put()
return analysis
def _AddAnalysisResults(self):
"""Create and store dummy data."""
analyses = []
stored_dates = {}
def StoreTestBuildDate(analysis_number, start_time):
if datetime: # pragma: no cover
stored_dates[analysis_number] = start_time.strftime(
'%Y-%m-%d %H:%M:%S UTC')
for i in range(0, 10):
analyses.append(self._AddAnalysisResult('m', 'b', i))
self._AddAnalysisResult('chromium.linux', 'Linux GN', 26120)
analyses.append(WfAnalysis.Get('chromium.linux', 'Linux GN', 26120))
analyses[1].status = wf_analysis_status.ANALYZED
analyses[2].status = wf_analysis_status.ANALYZED
analyses[3].status = wf_analysis_status.ANALYZED
analyses[4].status = wf_analysis_status.ERROR
analyses[7].status = wf_analysis_status.ANALYZED
analyses[9].status = wf_analysis_status.ANALYZED
analyses[10].status = wf_analysis_status.ANALYZED
analyses[2].build_start_time = datetime.datetime.utcnow()
StoreTestBuildDate(2, analyses[2].build_start_time)
analyses[7].build_start_time = (datetime.datetime.utcnow()
- datetime.timedelta(6))
StoreTestBuildDate(7, analyses[7].build_start_time)
analyses[10].build_start_time = (datetime.datetime.utcnow()
- datetime.timedelta(4))
StoreTestBuildDate(10, analyses[10].build_start_time)
analyses[1].result = {
'failures': [
{
'step_name': 'b',
'first_failure': 1,
'last_pass': None,
'suspected_cls': [
{
'build_number': 1,
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
},
}
],
}
]
}
analyses[2].result = {
'failures': [
{
'step_name': 'a',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
},
{
'step_name': 'b',
'first_failure': 1,
'last_pass': None,
'suspected_cls': [],
}
]
}
analyses[3].result = {
'failures': [
{
'step_name': 'a',
'first_failure': 3,
'last_pass': None,
'suspected_cls': [],
},
{
'step_name': 'b',
'first_failure': 2,
'last_pass': None,
'suspected_cls': [],
}
]
}
analyses[7].result = {
'failures': [
{
'step_name': 'a',
'first_failure': 7,
'last_pass': None,
'suspected_cls': [
{
'build_number': 7,
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None,
'score': 1,
'hints': {
'modified f99_2.cc (and it was in log)': 1,
},
},
{
'build_number': 7,
'repo_name': 'chromium',
'revision': 'r99_6',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/y/f99_7.cc (and it was in log)': 5,
},
}
],
},
{
'step_name': 'b',
'first_failure': 7,
'last_pass': None,
'suspected_cls': [
{
'build_number': 7,
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': 'https://chromium.googlesource.com/chromium/'
'src/r99_1',
'score': 5,
'hints': {
'added x/y/f99_1.cc (and it was in log)': 5,
},
}
],
}
]
}
analyses[9].result = {
'failures': [
{
'step_name': 'a',
'first_failure': 9,
'last_pass': None,
'suspected_cls': [],
},
{
'step_name': 'b',
'first_failure': 9,
'last_pass': None,
'suspected_cls': [
{
'build_number': 9,
'repo_name': 'chromium',
'revision': 'r99_9',
'commit_position': None,
'url': None,
'score': 1,
'hints': {
'modified f99_9.cc (and it was in log)': 1,
},
}
],
}
]
}
analyses[10].result = {
'failures': [
{
'step_name': 'a',
'first_failure': 10,
'last_pass': None,
'suspected_cls': [
{
'build_number': 10,
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None,
'score': 5,
'hints': {
'added x/f99_10.cc (and it was in log)': 5,
},
}
],
},
{
'step_name': 'b',
'first_failure': 10,
'last_pass': None,
'suspected_cls': [ {
'build_number': 10,
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None,
'score': 1,
'hints': {
'modified x/f99_9.cc (and it was in log)': 1,
},
}
],
}
]
}
for analysis in analyses:
analysis.suspected_cls = identify_culprit_pipeline._GetSuspectedCLs(
analysis.result)
analysis.result_status = (identify_culprit_pipeline.
_GetResultAnalysisStatus(analysis.result))
analysis.put()
analyses[1].result_status = wf_analysis_result_status.FOUND_INCORRECT
analyses[1].put()
analyses[3].result_status = wf_analysis_result_status.NOT_FOUND_INCORRECT
analyses[3].put()
analyses[10].result_status = wf_analysis_result_status.FOUND_CORRECT
analyses[10].put()
return stored_dates
def testDisplayAggregatedBuildAnalysisResults(self):
"""Basic test case, no parameters."""
expected_result = {
'analyses': [
{
'master_name': 'chromium.linux',
'builder_name': 'Linux GN',
'build_number': 26120,
'build_start_time': self.stored_dates.get(10),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None
}
],
'result_status': 'Correct - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 1,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[
{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': None
}
],
'result_status': 'Incorrect - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 3,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[],
'result_status': 'Incorrect - Not Found'
}
],
'triage': '-1',
'days': '-1',
'count': '-1',
'result_status': '-1'
}
response_json = self.test_app.get('/list-analyses?format=json')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
def testDisplayAggregatedBuildAnalysisResultsTriage(self):
"""Test for parameter triage."""
expected_result = {
'analyses': [
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 1,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[
{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': None
}
],
'result_status': 'Incorrect - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 3,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[],
'result_status': 'Incorrect - Not Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 7,
'build_start_time': self.stored_dates.get(7),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_2',
'commit_position': None,
'url': None
},
{
'repo_name': 'chromium',
'revision': 'r99_6',
'commit_position': None,
'url': None
},
{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': 'https://chromium.googlesource.com'
'/chromium/src/r99_1'
}
],
'result_status': 'Untriaged - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 9,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_9',
'commit_position': None,
'url': None
}
],
'result_status': 'Untriaged - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 2,
'build_start_time': self.stored_dates.get(2),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [],
'result_status': 'Untriaged - Not Found'
}
],
'triage': '1',
'days': '-1',
'count': '-1',
'result_status': '-1'
}
response_json = self.test_app.get('/list-analyses?format=json&triage=1')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
def testDisplayAggregatedBuildAnalysisResultsCount(self):
"""Test for parameter count."""
expected_result = {
'analyses': [
{
'master_name': 'chromium.linux',
'builder_name': 'Linux GN',
'build_number': 26120,
'build_start_time': self.stored_dates.get(10),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None
}
],
'result_status': 'Correct - Found'
},
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 1,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[
{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': None
}
],
'result_status': 'Incorrect - Found'
}
],
'triage': '-1',
'days': '-1',
'count': '2',
'result_status': '-1'
}
response_json = self.test_app.get('/list-analyses?format=json&count=2')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
def testDisplayAggregatedBuildAnalysisResultsResultStatus(self):
"""Test for parameter result_status."""
expected_result = {
'analyses': [
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 1,
'build_start_time': None,
'status': 70,
'status_description': 'Analyzed',
'suspected_cls':[
{
'repo_name': 'chromium',
'revision': 'r99_1',
'commit_position': None,
'url': None
}
],
'result_status': 'Incorrect - Found'
}
],
'triage': '-1',
'days': '-1',
'count': '-1',
'result_status': '10'
}
response_json = self.test_app.get(
'/list-analyses?format=json&result_status=10')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
def DisplayAggregatedBuildAnalysisResultsDays(self): # pragma: no cover
"""Test for parameter days. Parameter triage will be turned off.
This test case will only run locally, because it may cause flaky failure.
"""
expected_result = {
'analyses': [
{
'master_name': 'm',
'builder_name': 'b',
'build_number': 2,
'build_start_time': self.stored_dates.get(2),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [],
'result_status': 'Untriaged - Not Found'
},
{
'master_name': 'chromium.linux',
'builder_name': 'Linux GN',
'build_number': 26120,
'build_start_time': self.stored_dates.get(10),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None
}
],
'result_status': 'Correct - Found'
}
],
'triage': '1',
'days': '5',
'count': '-1',
'result_status': '-1'
}
response_json = self.test_app.get(
'/list-analyses?format=json&triage=1&days=5')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
def DisplayAggregatedBuildAnalysisResultsStatusDays(self): # pragma: no cover
"""Test for parameter combination dyas and result status.
This test case will only run locally, because it may cause flaky failure.
"""
expected_result = {
'analyses': [
{
'master_name': 'chromium.linux',
'builder_name': 'Linux GN',
'build_number': 26120,
'build_start_time': self.stored_dates.get(10),
'status': 70,
'status_description': 'Analyzed',
'suspected_cls': [
{
'repo_name': 'chromium',
'revision': 'r99_10',
'commit_position': None,
'url': None
}
],
'result_status': 'Correct - Found'
}
],
'triage': '-1',
'days': '6',
'count': '-1',
'result_status': '0'
}
response_json = self.test_app.get(
'/list-analyses?format=json&result_status=0&days=6')
self.assertEqual(200, response_json.status_int)
self.assertEqual(expected_result, response_json.json_body)
| {
"content_hash": "9b0af4413037cb292c6dc49d10d3c151",
"timestamp": "",
"source": "github",
"line_count": 580,
"max_line_length": 80,
"avg_line_length": 33.296551724137935,
"alnum_prop": 0.40902029826014913,
"repo_name": "nicko96/Chrome-Infra",
"id": "875d9040e94240b8e132de3800d61dcd0a85e58c",
"size": "19474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/findit/handlers/test/list_analyses_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
import tempfile
try:
from unittest.runner import TextTestResult
except ImportError:
# Support Python 2.6.
from unittest import _TextTestResult as TextTestResult
from tap.directive import Directive
from tap.line import Bail, Plan, Result
class Factory(object):
"""A factory to produce commonly needed objects"""
def make_ok(self, directive_text=''):
return Result(
True, 1, 'This is a description.', Directive(directive_text))
def make_not_ok(self, directive_text=''):
return Result(
False, 1, 'This is a description.', Directive(directive_text))
def make_bail(self, reason='Because it is busted.'):
return Bail(reason)
def make_plan(self, expected_tests=99, directive_text=''):
return Plan(expected_tests, Directive(directive_text))
def make_test_result(self):
stream = tempfile.TemporaryFile(mode='w')
return TextTestResult(stream, None, 1)
| {
"content_hash": "2ecd7e622096bc70dbd536bd7e063c9f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 74,
"avg_line_length": 30.903225806451612,
"alnum_prop": 0.6774530271398748,
"repo_name": "Mark-E-Hamilton/tappy",
"id": "b8597c9c9cc3a0b6bd0dba7d7a7fbc90aa6bb452",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tap/tests/factory.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "92379"
}
],
"symlink_target": ""
} |
from ..factory.config import ConfigFactory
from ..exceptions import DuplicateDocument
from urllib.parse import quote_plus
import arrow
import pymongo
class PostSaver:
def save(self, post=None):
""" Exceptions:
- AssertionError
"""
assert post is not None, "post is not defined."
post._insert_time = arrow.utcnow().datetime
config = ConfigFactory.get_config(ConfigFactory.DATABASE)
config = config.get("azure-document-db")
conn = pymongo.MongoClient(config["connectionString"])
db = conn[config["database"]]
db[config["collection"]].create_index("permalink", background=True)
if db[config["collection"]].count({"permalink": post.permalink}) > 0:
raise DuplicateDocument("Ops! Duplicate document!")
db[config["collection"]].insert(post.to_dict())
conn.close() | {
"content_hash": "b6c1e60c83d0d2e1e0f85addad89df4a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 32.08,
"alnum_prop": 0.7231920199501247,
"repo_name": "arsystem/ardegra",
"id": "6598bd73ba5d81312585bc27d7083ec15075560f",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/saver/post.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24690"
}
],
"symlink_target": ""
} |
import sys
import threading
import typing as tp # NOQA
from chainer import types # NOQA
if types.TYPE_CHECKING:
import numpy # NOQA
from chainer.graph_optimizations import static_graph # NOQA
class GlobalConfig(object):
debug = None # type: bool
cudnn_deterministic = None # type: bool
warn_nondeterministic = None # type: bool
enable_backprop = None # type: bool
keep_graph_on_report = None # type: bool
train = None # type: bool
type_check = None # type: bool
use_cudnn = None # type: str
use_cudnn_tensor_core = None # type: str
autotune = None # type: bool
schedule_func = None # type: tp.Optional[static_graph.StaticScheduleFunction] # NOQA
use_ideep = None # type: str
lazy_grad_sum = None # type: bool
cudnn_fast_batch_normalization = None # type: bool
dtype = None # type: numpy.dtype
in_recomputing = None # type: bool
"""The plain object that represents the global configuration of Chainer."""
def show(self, file=sys.stdout):
"""show(file=sys.stdout)
Prints the global config entries.
The entries are sorted in the lexicographical order of the entry name.
Args:
file: Output file-like object.
"""
keys = sorted(self.__dict__)
_print_attrs(self, keys, file)
class LocalConfig(object):
"""Thread-local configuration of Chainer.
This class implements the local configuration. When a value is set to this
object, the configuration is only updated in the current thread. When a
user tries to access an attribute and there is no local value, it
automatically retrieves a value from the global configuration.
"""
def __init__(self, global_config):
super(LocalConfig, self).__setattr__('_global', global_config)
super(LocalConfig, self).__setattr__('_local', threading.local())
def __delattr__(self, name):
delattr(self._local, name)
def __getattr__(self, name):
dic = self._local.__dict__
if name in dic:
return dic[name]
return getattr(self._global, name)
def __setattr__(self, name, value):
setattr(self._local, name, value)
def show(self, file=sys.stdout):
"""show(file=sys.stdout)
Prints the config entries.
The entries are sorted in the lexicographical order of the entry names.
Args:
file: Output file-like object.
.. admonition:: Example
You can easily print the list of configurations used in
the current thread.
>>> chainer.config.show() # doctest: +SKIP
debug False
enable_backprop True
train True
type_check True
"""
keys = sorted(set(self._global.__dict__) | set(self._local.__dict__))
_print_attrs(self, keys, file)
def _print_attrs(obj, keys, file):
max_len = max(len(key) for key in keys)
for key in keys:
spacer = ' ' * (max_len - len(key))
file.write(u'{} {}{}\n'.format(key, spacer, getattr(obj, key)))
global_config = GlobalConfig()
'''Global configuration of Chainer.
It is an instance of :class:`chainer.configuration.GlobalConfig`.
See :ref:`configuration` for details.
'''
config = LocalConfig(global_config)
'''Thread-local configuration of Chainer.
It is an instance of :class:`chainer.configuration.LocalConfig`, and is
referring to :data:`~chainer.global_config` as its default configuration.
See :ref:`configuration` for details.
'''
class _ConfigContext(object):
is_local = False
old_value = None
def __init__(self, config, name, value):
self.config = config
self.name = name
self.value = value
def __enter__(self):
name = self.name
value = self.value
config = self.config
is_local = hasattr(config._local, name)
if is_local:
self.old_value = getattr(config, name)
self.is_local = is_local
setattr(config, name, value)
def __exit__(self, typ, value, traceback):
if self.is_local:
setattr(self.config, self.name, self.old_value)
else:
delattr(self.config, self.name)
def using_config(name, value, config=config):
"""using_config(name, value, config=chainer.config)
Context manager to temporarily change the thread-local configuration.
Args:
name (str): Name of the configuration to change.
value: Temporary value of the configuration entry.
config (~chainer.configuration.LocalConfig): Configuration object.
Chainer's thread-local configuration is used by default.
.. seealso::
:ref:`configuration`
"""
return _ConfigContext(config, name, value)
| {
"content_hash": "e40400cbaf89180a69abd14fa5129a49",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 89,
"avg_line_length": 28.53529411764706,
"alnum_prop": 0.6237889095031952,
"repo_name": "tkerola/chainer",
"id": "b5e160acfc4fbc23587cce119f6b99abd7f45d80",
"size": "4851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3471733"
}
],
"symlink_target": ""
} |
"""Line-like geometrical entities.
Contains
========
LinearEntity
Line
Ray
Segment
"""
from __future__ import print_function, division
from sympy.core import S, C, sympify, Dummy
from sympy.core.logic import fuzzy_and
from sympy.core.exprtools import factor_terms
from sympy.core.relational import Eq
from sympy.functions.elementary.trigonometric import _pi_coeff as pi_coeff, \
sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.logic.boolalg import And
from sympy.simplify.simplify import simplify
from sympy.solvers import solve
from sympy.geometry.exceptions import GeometryError
from .entity import GeometryEntity
from .point import Point
from .util import _symbol
from sympy.core.compatibility import is_sequence
# TODO: this should be placed elsewhere and reused in other modules
class Undecidable(ValueError):
pass
class LinearEntity(GeometryEntity):
"""A base class for all linear entities (line, ray and segment)
in a 2-dimensional Euclidean space.
Attributes
==========
p1
p2
coefficients
slope
points
Notes
=====
This is an abstract class and is not meant to be instantiated.
See Also
========
sympy.geometry.entity.GeometryEntity
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1)
p2 = Point(p2)
if p1 == p2:
# if it makes sense to return a Point, handle in subclass
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
@property
def p1(self):
"""The first defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p1
Point(0, 0)
"""
return self.args[0]
@property
def p2(self):
"""The second defining point of a linear entity.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.p2
Point(5, 3)
"""
return self.args[1]
@property
def coefficients(self):
"""The coefficients (`a`, `b`, `c`) for `ax + by + c = 0`.
See Also
========
sympy.geometry.line.Line.equation
Examples
========
>>> from sympy import Point, Line
>>> from sympy.abc import x, y
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l = Line(p1, p2)
>>> l.coefficients
(-3, 5, 0)
>>> p3 = Point(x, y)
>>> l2 = Line(p1, p3)
>>> l2.coefficients
(-y, x, 0)
"""
p1, p2 = self.points
if p1.x == p2.x:
return (S.One, S.Zero, -p1.x)
elif p1.y == p2.y:
return (S.Zero, S.One, -p1.y)
return tuple([simplify(i) for i in
(self.p1.y - self.p2.y,
self.p2.x - self.p1.x,
self.p1.x*self.p2.y - self.p1.y*self.p2.x)])
@staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities are concurrent,
False : otherwise.
Notes
=====
Simply take the first two lines and find their intersection.
If there is no intersection, then the first two lines were
parallel and had no intersection so concurrency is impossible
amongst the whole set. Otherwise, check to see if the
intersection point of the first two lines is a member on
the rest of the lines. If so, the lines are concurrent.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point, Line, Line3D
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> p3, p4 = Point(-2, -2), Point(0, 2)
>>> l1, l2, l3 = Line(p1, p2), Line(p1, p3), Line(p1, p4)
>>> Line.are_concurrent(l1, l2, l3)
True
>>> l4 = Line(p2, p3)
>>> Line.are_concurrent(l2, l3, l4)
False
"""
# Concurrency requires intersection at a single point; One linear
# entity cannot be concurrent.
if len(lines) <= 1:
return False
try:
# Get the intersection (if parallel)
p = lines[0].intersection(lines[1])
if len(p) == 0:
return False
# Make sure the intersection is on every linear entity
for line in lines[2:]:
if p[0] not in line:
return False
return True
except AttributeError:
return False
def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4 = Point(3, 4), Point(6, 7)
>>> l1, l2 = Line(p1, p2), Line(p3, p4)
>>> Line.is_parallel(l1, l2)
True
>>> p5 = Point(6, 6)
>>> l3 = Line(p3, p5)
>>> Line.is_parallel(l1, l3)
False
"""
try:
a1, b1, c1 = l1.coefficients
a2, b2, c2 = l2.coefficients
return bool(simplify(a1*b2 - b1*a2) == 0)
except AttributeError:
return False
def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(-1, 1)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.is_perpendicular(l2)
True
>>> p4 = Point(5, 3)
>>> l3 = Line(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
try:
a1, b1, c1 = l1.coefficients
a2, b2, c2 = l2.coefficients
return bool(simplify(a1*a2 + b1*b2) == 0)
except AttributeError:
return False
def angle_between(l1, l2):
"""The angle formed between the two linear entities.
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
angle : angle in radians
Notes
=====
From the dot product of vectors v1 and v2 it is known that:
``dot(v1, v2) = |v1|*|v2|*cos(A)``
where A is the angle formed between the two vectors. We can
get the directional vectors of the two lines and readily
find the angle between the two using the above formula.
See Also
========
is_perpendicular
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(0, 4), Point(2, 0)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.angle_between(l2)
pi/2
"""
v1 = l1.p2 - l1.p1
v2 = l2.p2 - l2.p1
return C.acos(v1.dot(v2)/(abs(v1)*abs(v2)))
def parallel_line(self, p):
"""Create a new Line parallel to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
is_parallel
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.parallel_line(p3)
>>> p3 in l2
True
>>> l1.is_parallel(l2)
True
"""
d = self.p1 - self.p2
return Line(p, p + d)
def perpendicular_line(self, p):
"""Create a new Line perpendicular to this linear entity which passes
through the point `p`.
Parameters
==========
p : Point
Returns
=======
line : Line
See Also
========
is_perpendicular, perpendicular_segment
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(2, 3), Point(-2, 2)
>>> l1 = Line(p1, p2)
>>> l2 = l1.perpendicular_line(p3)
>>> p3 in l2
True
>>> l1.is_perpendicular(l2)
True
"""
d1, d2 = (self.p1 - self.p2).args
if d2 == 0: # If a horizontal line
if p.y == self.p1.y: # if p is on this linear entity
return Line(p, p + Point(0, 1))
else:
p2 = Point(p.x, self.p1.y)
return Line(p, p2)
else:
p2 = Point(p.x - d2, p.y + d1)
return Line(p, p2)
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The enpoints of the segment are ``p`` and the closest point in
the line containing self. (If self is not a line, the point might
not be in self.)
Parameters
==========
p : Point
Returns
=======
segment : Segment
Notes
=====
Returns `p` itself if `p` is on this linear entity.
See Also
========
perpendicular_line
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, 2)
>>> l1 = Line(p1, p2)
>>> s1 = l1.perpendicular_segment(p3)
>>> l1.is_perpendicular(s1)
True
>>> p3 in s1
True
>>> l1.perpendicular_segment(Point(4, 0))
Segment(Point(2, 2), Point(4, 0))
"""
if p in self:
return p
a, b, c = self.coefficients
if a == 0: # horizontal
p2 = Point(p.x, self.p1.y)
elif b == 0: # vertical
p2 = Point(self.p1.x, p.y)
else:
# ax + by + c = 0
y = (-c - a*p.x)/b
m = self.slope
d2 = 1 + m**2
H = p.y - y
dx = m*H/d2
dy = m*dx
p2 = (p.x + dx, y + dy)
return Segment(p, p2)
@property
def length(self):
"""
The length of the line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.length
oo
"""
return S.Infinity
@property
def slope(self):
"""The slope of this linear entity, or infinity if vertical.
Returns
=======
slope : number or sympy expression
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> l1 = Line(p1, p2)
>>> l1.slope
5/3
>>> p3 = Point(0, 4)
>>> l2 = Line(p1, p3)
>>> l2.slope
oo
"""
d1, d2 = (self.p1 - self.p2).args
if d1 == 0:
return S.Infinity
return simplify(d2/d1)
@property
def points(self):
"""The two points used to define this linear entity.
Returns
=======
points : tuple of Points
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 11)
>>> l1 = Line(p1, p2)
>>> l1.points
(Point(0, 0), Point(5, 11))
"""
return (self.p1, self.p2)
def projection(self, o):
"""Project a point, line, ray, or segment onto this linear entity.
Parameters
==========
other : Point or LinearEntity (Line, Ray, Segment)
Returns
=======
projection : Point or LinearEntity (Line, Ray, Segment)
The return type matches the type of the parameter ``other``.
Raises
======
GeometryError
When method is unable to perform projection.
Notes
=====
A projection involves taking the two points that define
the linear entity and projecting those points onto a
Line and then reforming the linear entity using these
projections.
A point P is projected onto a line L by finding the point
on L that is closest to P. This point is the intersection
of L and the line perpendicular to L that passes through P.
See Also
========
sympy.geometry.point.Point, perpendicular_line
Examples
========
>>> from sympy import Point, Line, Segment, Rational
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(Rational(1, 2), 0)
>>> l1 = Line(p1, p2)
>>> l1.projection(p3)
Point(1/4, 1/4)
>>> p4, p5 = Point(10, 0), Point(12, 1)
>>> s1 = Segment(p4, p5)
>>> l1.projection(s1)
Segment(Point(5, 5), Point(13/2, 13/2))
"""
tline = Line(self.p1, self.p2)
def _project(p):
"""Project a point onto the line representing self."""
if p in tline:
return p
l1 = tline.perpendicular_line(p)
return tline.intersection(l1)[0]
projected = None
if isinstance(o, Point):
return _project(o)
elif isinstance(o, LinearEntity):
n_p1 = _project(o.p1)
n_p2 = _project(o.p2)
if n_p1 == n_p2:
projected = n_p1
else:
projected = o.__class__(n_p1, n_p2)
# Didn't know how to project so raise an error
if projected is None:
n1 = self.__class__.__name__
n2 = o.__class__.__name__
raise GeometryError(
"Do not know how to project %s onto %s" % (n2, n1))
return self.intersection(projected)[0]
def intersection(self, o):
"""The intersection with another geometrical entity.
Parameters
==========
o : Point or LinearEntity
Returns
=======
intersection : list of geometrical entities
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(7, 7)
>>> l1 = Line(p1, p2)
>>> l1.intersection(p3)
[Point(7, 7)]
>>> p4, p5 = Point(5, 0), Point(0, 3)
>>> l2 = Line(p4, p5)
>>> l1.intersection(l2)
[Point(15/8, 15/8)]
>>> p6, p7 = Point(0, 5), Point(2, 6)
>>> s1 = Segment(p6, p7)
>>> l1.intersection(s1)
[]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
a1, b1, c1 = self.coefficients
a2, b2, c2 = o.coefficients
t = simplify(a1*b2 - a2*b1)
if t.equals(0) is not False: # assume they are parallel
if isinstance(self, Line):
if o.p1 in self:
return [o]
return []
elif isinstance(o, Line):
if self.p1 in o:
return [self]
return []
elif isinstance(self, Ray):
if isinstance(o, Ray):
# case 1, rays in the same direction
if self.xdirection == o.xdirection and \
self.ydirection == o.ydirection:
return [self] if (self.source in o) else [o]
# case 2, rays in the opposite directions
else:
if o.source in self:
if self.source == o.source:
return [self.source]
return [Segment(o.source, self.source)]
return []
elif isinstance(o, Segment):
if o.p1 in self:
if o.p2 in self:
return [o]
return [Segment(o.p1, self.source)]
elif o.p2 in self:
return [Segment(o.p2, self.source)]
return []
elif isinstance(self, Segment):
if isinstance(o, Ray):
return o.intersection(self)
elif isinstance(o, Segment):
# A reminder that the points of Segments are ordered
# in such a way that the following works. See
# Segment.__new__ for details on the ordering.
if self.p1 not in o:
if self.p2 not in o:
# Neither of the endpoints are in o so either
# o is contained in this segment or it isn't
if o in self:
return [self]
return []
else:
# p1 not in o but p2 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p2 == o.p1:
return [o.p1]
return [Segment(o.p1, self.p2)]
elif self.p2 not in o:
# p2 not in o but p1 is. Either there is a
# segment as an intersection, or they only
# intersect at an endpoint
if self.p1 == o.p2:
return [o.p2]
return [Segment(o.p2, self.p1)]
# Both points of self in o so the whole segment
# is in o
return [self]
# Unknown linear entity
return []
# Not parallel, so find the point of intersection
px = simplify((b1*c2 - c1*b2) / t)
py = simplify((a2*c1 - a1*c2) / t)
inter = Point(px, py)
# we do not use a simplistic 'inter in self and inter in o'
# because that requires an equality test that is fragile;
# instead we employ some diagnostics to see if the intersection
# is valid
def inseg(self):
def _between(a, b, c):
return c >= a and c <= b or c <= a and c >= b
if _between(self.p1.x, self.p2.x, inter.x) and \
_between(self.p1.y, self.p2.y, inter.y):
return True
def inray(self):
if self.p1 == inter:
return True
sray = Ray(self.p1, inter)
if sray.xdirection == self.xdirection and \
sray.ydirection == self.ydirection:
return True
prec = (Line, Ray, Segment)
if prec.index(self.func) > prec.index(o.func):
self, o = o, self
rv = [inter]
if isinstance(self, Line):
if isinstance(o, Line):
return rv
elif isinstance(o, Ray) and inray(o):
return rv
elif isinstance(o, Segment) and inseg(o):
return rv
elif isinstance(self, Ray) and inray(self):
if isinstance(o, Ray) and inray(o):
return rv
elif isinstance(o, Segment) and inseg(o):
return rv
elif isinstance(self, Segment) and inseg(self):
if isinstance(o, Segment) and inseg(o):
return rv
return []
return o.intersection(self)
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the Line.
Parameters
==========
parameter : str, optional
The name of the parameter which will be used for the parametric
point. The default value is 't'. When this parameter is 0, the
first point used to define the line will be returned, and when
it is 1 the second point will be returned.
Returns
=======
point : Point
Raises
======
ValueError
When ``parameter`` already appears in the Line's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.arbitrary_point()
Point(4*t + 1, 3*t)
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name)
x = simplify(self.p1.x + t*(self.p2.x - self.p1.x))
y = simplify(self.p1.y + t*(self.p2.y - self.p1.y))
return Point(x, y)
def random_point(self):
"""A random point on a LinearEntity.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> p3 = l1.random_point()
>>> # random point - don't know its coords in advance
>>> p3 # doctest: +ELLIPSIS
Point(...)
>>> # point should belong to the line
>>> p3 in l1
True
"""
from random import randint
# The lower and upper
lower, upper = -2**32 - 1, 2**32
if self.slope is S.Infinity:
if isinstance(self, Ray):
if self.ydirection is S.Infinity:
lower = self.p1.y
else:
upper = self.p1.y
elif isinstance(self, Segment):
lower = self.p1.y
upper = self.p2.y
x = self.p1.x
y = randint(lower, upper)
else:
if isinstance(self, Ray):
if self.xdirection is S.Infinity:
lower = self.p1.x
else:
upper = self.p1.x
elif isinstance(self, Segment):
lower = self.p1.x
upper = self.p2.x
a, b, c = self.coefficients
x = randint(lower, upper)
y = (-c - a*x) / b
return Point(x, y)
def is_similar(self, other):
"""
Return True if self and other are contained in the same line.
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 1), Point(3, 4), Point(2, 3)
>>> l1 = Line(p1, p2)
>>> l2 = Line(p1, p3)
>>> l1.is_similar(l2)
True
"""
def _norm(a, b, c):
if a != 0:
return 1, b/a, c/a
elif b != 0:
return a/b, 1, c/b
else:
return c
return _norm(*self.coefficients) == _norm(*other.coefficients)
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
raise Undecidable(
"can't decide whether '%s' contains '%s'" % (self, other))
def contains(self, other):
"""Subclasses should implement this method and should return
True if other is on the boundaries of self;
False if not on the boundaries of self;
None if a determination cannot be made."""
raise NotImplementedError()
class Line(LinearEntity):
"""An infinite line in space.
A line is declared with two distinct points or a point and slope
as defined using keyword `slope`.
Notes
=====
At the moment only lines in a 2D space can be declared, because
Points can be defined only for 2D spaces.
Parameters
==========
p1 : Point
pt : Point
slope : sympy expression
See Also
========
sympy.geometry.point.Point
Examples
========
>>> import sympy
>>> from sympy import Point
>>> from sympy.abc import L
>>> from sympy.geometry import Line, Segment
>>> L = Line(Point(2,3), Point(3,5))
>>> L
Line(Point(2, 3), Point(3, 5))
>>> L.points
(Point(2, 3), Point(3, 5))
>>> L.equation()
-2*x + y + 1
>>> L.coefficients
(-2, 1, 1)
Instantiate with keyword ``slope``:
>>> Line(Point(0, 0), slope=0)
Line(Point(0, 0), Point(1, 0))
Instantiate with another linear object
>>> s = Segment((0, 0), (0, 1))
>>> Line(s).equation()
x
"""
def __new__(cls, p1, pt=None, slope=None, **kwargs):
if isinstance(p1, LinearEntity):
p1, pt = p1.args
else:
p1 = Point(p1)
if pt is not None and slope is None:
try:
p2 = Point(pt)
except NotImplementedError:
raise ValueError('The 2nd argument was not a valid Point. '
'If it was a slope, enter it with keyword "slope".')
elif slope is not None and pt is None:
slope = sympify(slope)
if slope.is_finite is False:
# when infinite slope, don't change x
dx = 0
dy = 1
else:
# go over 1 up slope
dx = 1
dy = slope
# XXX avoiding simplification by adding to coords directly
p2 = Point(p1.x + dx, p1.y + dy)
else:
raise ValueError('A 2nd Point or keyword "slope" must be used.')
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of line. Gives
values that will produce a line that is +/- 5 units long (where a
unit is the distance between the two points that define the line).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.plot_interval()
[t, -5, 5]
"""
t = _symbol(parameter)
return [t, -5, 5]
def equation(self, x='x', y='y'):
"""The equation of the line: ax + by + c.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
LinearEntity.coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(1, 0), Point(5, 3)
>>> l1 = Line(p1, p2)
>>> l1.equation()
-3*x + 4*y + 3
"""
x, y = _symbol(x), _symbol(y)
p1, p2 = self.points
if p1.x == p2.x:
return x - p1.x
elif p1.y == p2.y:
return y - p1.y
a, b, c = self.coefficients
return simplify(a*x + b*y + c)
def contains(self, o):
"""
Return True if o is on this Line, or False otherwise.
Examples
========
>>> from sympy import Line,Point
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> l = Line(p1, p2)
>>> l.contains(p1)
True
>>> l.contains((0, 1))
True
>>> l.contains((0, 0))
False
"""
if is_sequence(o):
o = Point(o)
if isinstance(o, Point):
o = o.func(*[simplify(i) for i in o.args])
x, y = Dummy(), Dummy()
eq = self.equation(x, y)
if not eq.has(y):
return (solve(eq, x)[0] - o.x).equals(0)
if not eq.has(x):
return (solve(eq, y)[0] - o.y).equals(0)
return (solve(eq.subs(x, o.x), y)[0] - o.y).equals(0)
elif not isinstance(o, LinearEntity):
return False
elif isinstance(o, Line):
return self.__eq__(o)
elif not self.is_similar(o):
return False
else:
return o.p1 in self and o.p2 in self
def distance(self, o):
"""
Finds the shortest distance between a line and a point.
Raises
======
NotImplementedError is raised if o is not a Point
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Line(p1, p2)
>>> s.distance(Point(-1, 1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
"""
if not isinstance(o, Point):
if is_sequence(o):
o = Point(o)
a, b, c = self.coefficients
if 0 in (a, b):
return self.perpendicular_segment(o).length
m = self.slope
x = o.x
y = m*x - c/b
return abs(factor_terms(o.y - y))/sqrt(1 + m**2)
def equal(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Line):
return False
return Point.is_collinear(self.p1, other.p1, self.p2, other.p2)
class Ray(LinearEntity):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
xdirection
ydirection
See Also
========
sympy.geometry.point.Point, Line
Notes
=====
At the moment only rays in a 2D space can be declared, because
Points can be defined only for 2D spaces.
Examples
========
>>> import sympy
>>> from sympy import Point, pi
>>> from sympy.abc import r
>>> from sympy.geometry import Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray(Point(2, 3), Point(3, 5))
>>> r.points
(Point(2, 3), Point(3, 5))
>>> r.source
Point(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, pt=None, angle=None, **kwargs):
p1 = Point(p1)
if pt is not None and angle is None:
try:
p2 = Point(pt)
except NotImplementedError:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
The 2nd argument was not a valid Point; if
it was meant to be an angle it should be
given with keyword "angle".'''))
if p1 == p2:
raise ValueError('A Ray requires two distinct points.')
elif angle is not None and pt is None:
# we need to know if the angle is an odd multiple of pi/2
c = pi_coeff(sympify(angle))
p2 = None
if c is not None:
if c.is_Rational:
if c.q == 2:
if c.p == 1:
p2 = p1 + Point(0, 1)
elif c.p == 3:
p2 = p1 + Point(0, -1)
elif c.q == 1:
if c.p == 0:
p2 = p1 + Point(1, 0)
elif c.p == 1:
p2 = p1 + Point(-1, 0)
if p2 is None:
c *= S.Pi
else:
c = angle % (2*S.Pi)
if not p2:
m = 2*c/S.Pi
left = And(1 < m, m < 3) # is it in quadrant 2 or 3?
x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))
y = Piecewise((-C.tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (C.tan(c), True)), True))
p2 = p1 + Point(x, y)
else:
raise ValueError('A 2nd point or keyword "angle" must be used.')
return LinearEntity.__new__(cls, p1, p2, **kwargs)
@property
def source(self):
"""The point from which the ray emanates.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(4, 1)
>>> r1 = Ray(p1, p2)
>>> r1.source
Point(0, 0)
"""
return self.p1
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, -1)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(-1, -1), Point(-1, 0)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
def distance(self, o):
"""
Finds the shortest distance between the ray and a point.
Raises
======
NotImplementedError is raised if o is not a Point
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> s = Ray(p1, p2)
>>> s.distance(Point(-1, -1))
sqrt(2)
>>> s.distance((-1, 2))
3*sqrt(2)/2
"""
if not isinstance(o, Point):
if is_sequence(o):
o = Point(o)
s = self.perpendicular_segment(o)
if isinstance(s, Point):
if self.contains(s):
return S.Zero
else:
# since arg-order is arbitrary, find the non-o point
non_o = s.p1 if s.p1 != o else s.p2
if self.contains(non_o):
return Line(self).distance(o) # = s.length but simpler
# the following applies when neither of the above apply
return self.source.distance(o)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ray. Gives
values that will produce a ray that is 10 units long (where a unit is
the distance between the two points that define the ray).
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ray, pi
>>> r = Ray((0, 0), angle=pi/4)
>>> r.plot_interval()
[t, 0, 10]
"""
t = _symbol(parameter)
return [t, 0, 10]
def equals(self, other):
"""Returns True if self and other are the same mathematical entities"""
if not isinstance(other, Ray):
return False
return self.source == other.source and other.p2 in self
def contains(self, o):
"""
Is other GeometryEntity contained in this Ray?
Examples
========
>>> from sympy import Ray,Point,Segment
>>> p1, p2 = Point(0, 0), Point(4, 4)
>>> r = Ray(p1, p2)
>>> r.contains(p1)
True
>>> r.contains((1, 1))
True
>>> r.contains((1, 3))
False
>>> s = Segment((1, 1), (2, 2))
>>> r.contains(s)
True
>>> s = Segment((1, 2), (2, 5))
>>> r.contains(s)
False
>>> r1 = Ray((2, 2), (3, 3))
>>> r.contains(r1)
True
>>> r1 = Ray((2, 2), (3, 5))
>>> r.contains(r1)
False
"""
if isinstance(o, Ray):
return (Point.is_collinear(self.p1, self.p2, o.p1, o.p2) and
self.xdirection == o.xdirection and
self.ydirection == o.ydirection)
elif isinstance(o, Segment):
return o.p1 in self and o.p2 in self
elif is_sequence(o):
o = Point(o)
if isinstance(o, Point):
if Point.is_collinear(self.p1, self.p2, o):
if self.xdirection is S.Infinity:
rv = o.x >= self.source.x
elif self.xdirection is S.NegativeInfinity:
rv = o.x <= self.source.x
elif self.ydirection is S.Infinity:
rv = o.y >= self.source.y
else:
rv = o.y <= self.source.y
if rv == True or rv == False:
return bool(rv)
raise Undecidable(
'Cannot determine if %s is in %s' % (o, self))
else:
# Points are not collinear, so the rays are not parallel
# and hence it is impossible for self to contain o
return False
# No other known entity can be contained in a Ray
return False
class Segment(LinearEntity):
"""A undirected line segment in space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or sympy expression
midpoint : Point
See Also
========
sympy.geometry.point.Point, Line
Notes
=====
At the moment only segments in a 2D space can be declared, because
Points can be defined only for 2D spaces.
Examples
========
>>> import sympy
>>> from sympy import Point
>>> from sympy.abc import s
>>> from sympy.geometry import Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment(Point(1, 0), Point(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1))
>>> s
Segment(Point(1, 1), Point(4, 3))
>>> s.points
(Point(1, 1), Point(4, 3))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point(5/2, 2)
"""
def __new__(cls, p1, p2, **kwargs):
# Reorder the two points under the following ordering:
# if p1.x != p2.x then p1.x < p2.x
# if p1.x == p2.x then p1.y < p2.y
p1 = Point(p1)
p2 = Point(p2)
if p1 == p2:
return Point(p1)
if (p1.x > p2.x) == True:
p1, p2 = p2, p1
elif (p1.x == p2.x) == True and (p1.y > p2.y) == True:
p1, p2 = p2, p1
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Segment gives
values that will produce the full segment in a plot.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(5, 3)
>>> s1 = Segment(p1, p2)
>>> s1.plot_interval()
[t, 0, 1]
"""
t = _symbol(parameter)
return [t, 0, 1]
def perpendicular_bisector(self, p=None):
"""The perpendicular bisector of this segment.
If no point is specified or the point specified is not on the
bisector then the bisector is returned as a Line. Otherwise a
Segment is returned that joins the point specified and the
intersection of the bisector and the segment.
Parameters
==========
p : Point
Returns
=======
bisector : Line or Segment
See Also
========
LinearEntity.perpendicular_segment
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2, p3 = Point(0, 0), Point(6, 6), Point(5, 1)
>>> s1 = Segment(p1, p2)
>>> s1.perpendicular_bisector()
Line(Point(3, 3), Point(9, -3))
>>> s1.perpendicular_bisector(p3)
Segment(Point(3, 3), Point(5, 1))
"""
l = LinearEntity.perpendicular_line(self, self.midpoint)
if p is None or p not in l:
return l
else:
return Segment(self.midpoint, p)
@property
def length(self):
"""The length of the line segment.
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.length
5
"""
return Point.distance(self.p1, self.p2)
@property
def midpoint(self):
"""The midpoint of the line segment.
See Also
========
sympy.geometry.point.Point.midpoint
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 0), Point(4, 3)
>>> s1 = Segment(p1, p2)
>>> s1.midpoint
Point(2, 3/2)
"""
return Point.midpoint(self.p1, self.p2)
def distance(self, o):
"""
Finds the shortest distance between a line segment and a point.
Raises
======
NotImplementedError is raised if o is not a Point
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s.distance(Point(10, 15))
sqrt(170)
>>> s.distance((0, 12))
sqrt(73)
"""
if is_sequence(o):
o = Point(o)
if isinstance(o, Point):
seg_vector = self.p2 - self.p1
pt_vector = o - self.p1
t = seg_vector.dot(pt_vector)/self.length**2
if t >= 1:
distance = Point.distance(self.p2, o)
elif t <= 0:
distance = Point.distance(self.p1, o)
else:
distance = Point.distance(
self.p1 + Point(t*seg_vector.x, t*seg_vector.y), o)
return distance
raise NotImplementedError()
def contains(self, other):
"""
Is the other GeometryEntity contained within this Segment?
Examples
========
>>> from sympy import Point, Segment
>>> p1, p2 = Point(0, 1), Point(3, 4)
>>> s = Segment(p1, p2)
>>> s2 = Segment(p2, p1)
>>> s.contains(s2)
True
"""
if isinstance(other, Segment):
return other.p1 in self and other.p2 in self
elif isinstance(other, Point):
if Point.is_collinear(self.p1, self.p2, other):
t = Dummy('t')
x, y = self.arbitrary_point(t).args
if self.p1.x != self.p2.x:
ti = solve(x - other.x, t)[0]
else:
ti = solve(y - other.y, t)[0]
if ti.is_number:
return 0 <= ti <= 1
return None
# No other known entity can be contained in a Ray
return False
| {
"content_hash": "50602985fcd2c0eb49b6981072f7db39",
"timestamp": "",
"source": "github",
"line_count": 1733,
"max_line_length": 116,
"avg_line_length": 26.8401615695326,
"alnum_prop": 0.4739648277937825,
"repo_name": "dqnykamp/sympy",
"id": "d6c643005534350b24a05d0a3d8958014c556179",
"size": "46514",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sympy/geometry/line.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15195033"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4018"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
import os
import re
import time
import urllib.request as urllib2
import yaml
from kubernetes import client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from toscaparser import tosca_template
from tacker._i18n import _
from tacker.common.container import kubernetes_utils
from tacker.common import exceptions
from tacker.common import log
from tacker.common import utils
from tacker.extensions import common_services as cs
from tacker.extensions import vnfm
from tacker import objects
from tacker.objects.fields import ErrorPoint as EP
from tacker.objects import vnf_package as vnf_package_obj
from tacker.objects import vnf_package_vnfd as vnfd_obj
from tacker.objects import vnf_resources as vnf_resource_obj
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.infra_drivers import abstract_driver
from tacker.vnfm.infra_drivers.kubernetes.helm import helm_client
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
from tacker.vnfm.infra_drivers.kubernetes import translate_template
from tacker.vnfm.infra_drivers import scale_driver
from urllib.parse import urlparse
CNF_TARGET_FILES_KEY = 'lcm-kubernetes-def-files'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
VNFC_POD_NOT_FOUND = "POD_NOT_FOUND"
OPTS = [
cfg.IntOpt('stack_retries',
default=100,
help=_("Number of attempts to retry for stack"
" creation/deletion")),
cfg.IntOpt('stack_retry_wait',
default=5,
help=_("Wait time (in seconds) between consecutive stack"
" create/delete retries")),
]
CONF.register_opts(OPTS, group='kubernetes_vim')
def config_opts():
return [('kubernetes_vim', OPTS)]
SCALING_POLICY = 'tosca.policies.tacker.Scaling'
COMMA_CHARACTER = ','
HELM_CHART_DIR_BASE = "/var/tacker/helm"
def get_scaling_policy_name(action, policy_name):
return '%s_scale_%s' % (policy_name, action)
class Kubernetes(abstract_driver.VnfAbstractDriver,
scale_driver.VnfScaleAbstractDriver):
"""Kubernetes infra driver for hosting containerized vnfs"""
def __init__(self):
super(Kubernetes, self).__init__()
self.STACK_RETRIES = cfg.CONF.kubernetes_vim.stack_retries
self.STACK_RETRY_WAIT = cfg.CONF.kubernetes_vim.stack_retry_wait
self.kubernetes = kubernetes_utils.KubernetesHTTPAPI()
self.CHECK_DICT_KEY = [
"Pod",
"Service",
"PersistentVolumeClaim",
"Namespace",
"Node",
"PersistentVolume",
"APIService",
"DaemonSet",
"Deployment",
"ReplicaSet",
"StatefulSet",
"Job",
"VolumeAttachment"
]
def get_type(self):
return 'kubernetes'
def get_name(self):
return 'kubernetes'
def get_description(self):
return 'Kubernetes infra driver'
@log.log
def create(self, plugin, context, vnf, auth_attr):
"""Create function
Create ConfigMap, Deployment, Service and Horizontal Pod Autoscaler
objects. Return a string that contains all deployment namespace and
names for tracking resources.
"""
LOG.debug('vnf %s', vnf)
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
scaling_api_client = self.kubernetes.get_scaling_api_client(
auth=auth_cred)
tosca_to_kubernetes = translate_template.TOSCAToKubernetes(
vnf=vnf,
core_v1_api_client=core_v1_api_client,
app_v1_api_client=app_v1_api_client,
scaling_api_client=scaling_api_client)
deployment_names = tosca_to_kubernetes.deploy_kubernetes_objects()
except Exception as e:
LOG.error('Creating VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
return deployment_names
def create_wait(self, plugin, context, vnf_dict, vnf_id, auth_attr):
"""Create wait function
Create wait function will marked VNF is ACTIVE when all status state
from Pod objects is RUNNING.
"""
# initialize Kubernetes APIs
if '{' not in vnf_id and '}' not in vnf_id:
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = \
self.kubernetes.get_core_v1_api_client(auth=auth_cred)
deployment_info = vnf_id.split(COMMA_CHARACTER)
mgmt_ips = dict()
pods_information = self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'Pending' and stack_retries > 0:
time.sleep(self.STACK_RETRY_WAIT)
pods_information = \
self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
LOG.debug('VNF initializing status: %(service_name)s '
'%(status)s',
{'service_name': str(deployment_info),
'status': status})
if stack_retries == 0 and status != 'Running':
error_reason = _(
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(
self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != 'Running':
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
for i in range(0, len(deployment_info), 2):
namespace = deployment_info[i]
deployment_name = deployment_info[i + 1]
service_info = core_v1_api_client.read_namespaced_service(
name=deployment_name,
namespace=namespace)
if service_info.metadata.labels.get(
"management_connection"):
vdu_name = service_info.metadata.labels.\
get("vdu_name").split("-")[1]
mgmt_ip = service_info.spec.cluster_ip
mgmt_ips.update({vdu_name: mgmt_ip})
vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(
mgmt_ips)
except Exception as e:
LOG.error('Creating wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def create_wait_k8s(self, k8s_objs, k8s_client_dict, vnf_instance):
try:
time.sleep(self.STACK_RETRY_WAIT)
keep_going = True
stack_retries = self.STACK_RETRIES
while keep_going and stack_retries > 0:
for k8s_obj in k8s_objs:
kind = k8s_obj.get('object').kind
namespace = k8s_obj.get('namespace')
if hasattr(k8s_obj.get('object').metadata, 'name'):
name = k8s_obj.get('object').metadata.name
else:
name = ''
api_version = k8s_obj.get('object').api_version
if k8s_obj.get('status') == 'Creating':
if kind in self.CHECK_DICT_KEY:
check_method = self.\
_select_check_status_by_kind(kind)
check_method(k8s_client_dict, k8s_obj,
namespace, name, api_version)
else:
k8s_obj['status'] = 'Create_complete'
keep_going = False
for k8s_obj in k8s_objs:
if k8s_obj.get('status') != 'Create_complete':
keep_going = True
else:
if k8s_obj.get('object', '').metadata:
LOG.debug(
'Resource namespace: {namespace},'
'name:{name},kind: {kind} '
'is create complete'.format(
namespace=k8s_obj.get('namespace'),
name=k8s_obj.get('object').metadata.name,
kind=k8s_obj.get('object').kind)
)
else:
LOG.debug(
'Resource namespace: {namespace},'
'name:{name},kind: {kind} '
'is create complete'.format(
namespace=k8s_obj.get('namespace'),
name='',
kind=k8s_obj.get('object').kind)
)
if keep_going:
time.sleep(self.STACK_RETRY_WAIT)
stack_retries -= 1
if stack_retries == 0 and keep_going:
LOG.error('It is time out, When instantiate cnf,'
'waiting for resource creation.')
for k8s_obj in k8s_objs:
if k8s_obj.get('status') == 'Creating':
k8s_obj['status'] = 'Wait_failed'
err_reason = _("The resources are creating time out."
"namespace: {namespace}, name:{name}, "
"kind: {kind}).Reason: {message}").\
format(namespace=k8s_obj.get('namespace'),
name=k8s_obj.get('object').metadata.name,
kind=k8s_obj.get('object').kind,
message=k8s_obj['message'])
LOG.error(err_reason)
error_reason = _(
"Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT),
stack=vnf_instance.id
)
raise vnfm.CNFCreateWaitFailed(reason=error_reason)
return k8s_objs
except Exception as e:
LOG.error('Creating wait CNF got an error due to %s', e)
raise e
def _select_check_status_by_kind(self, kind):
check_dict = {
"Pod": self._check_status_pod,
"Service": self._check_status_service,
"PersistentVolumeClaim":
self._check_status_persistent_volume_claim,
"Namespace": self._check_status_namespace,
"Node": self._check_status_node,
"PersistentVolume": self._check_status_persistent_volume,
"APIService": self._check_status_api_service,
"DaemonSet": self._check_status_daemon_set,
"Deployment": self._check_status_deployment,
"ReplicaSet": self._check_status_replica_set,
"StatefulSet": self._check_status_stateful_set,
"Job": self._check_status_job,
"VolumeAttachment": self._check_status_volume_attachment
}
return check_dict[kind]
def _check_is_ip(self, ip_str):
if re.match(r'^\d{,3}.\d{,3}.\d{,3}.\d{,3}$', ip_str):
num_list = [int(x) for x in ip_str.split('.')]
for i in num_list:
if i > 255 or i < 0:
return False
return True
else:
return False
def _check_status_stateful_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
stateful_set = k8s_client_dict[api_version]. \
read_namespaced_stateful_set(namespace=namespace, name=name)
if stateful_set.status.replicas != \
stateful_set.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Pod in StatefulSet is still creating. " \
"The pod is ready {value1}/{value2}".format(
value1=stateful_set.status.ready_replicas,
value2=stateful_set.status.replicas
)
else:
for i in range(0, stateful_set.spec.replicas):
volume_claim_templates = stateful_set.spec.\
volume_claim_templates
for volume_claim_template in volume_claim_templates:
pvc_name = "-".join(
[volume_claim_template.metadata.name, name, str(i)])
persistent_volume_claim = k8s_client_dict['v1']. \
read_namespaced_persistent_volume_claim(
namespace=namespace, name=pvc_name)
if persistent_volume_claim.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolumeClaim in " \
"StatefulSet is still " \
"creating." \
"The status is " \
"{status}".format(
status=persistent_volume_claim.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'StatefulSet is created'
def _check_status_pod(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
pod = k8s_client_dict[api_version].read_namespaced_pod(
namespace=namespace, name=name)
if pod.status.phase != 'Running':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Pod is still creating. The status is " \
"{status}".format(status=pod.
status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Pod is created"
def _check_status_service(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
service = k8s_client_dict[api_version].read_namespaced_service(
namespace=namespace, name=name)
status_flag = False
if service.spec.cluster_ip in ['', None] or \
self._check_is_ip(service.spec.cluster_ip):
try:
endpoint = k8s_client_dict['v1'].\
read_namespaced_endpoints(namespace=namespace, name=name)
if endpoint:
status_flag = True
except Exception as e:
msg = _('read endpoinds failed.kind:{kind}.reason:{e}'.format(
kind=service.kind, e=e))
LOG.error(msg)
raise exceptions.ReadEndpoindsFalse(error=msg)
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Service is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Service is still creating." \
"The status is False"
def _check_status_persistent_volume_claim(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
claim = k8s_client_dict[api_version].\
read_namespaced_persistent_volume_claim(
namespace=namespace, name=name)
if claim.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolumeClaim is still creating."\
"The status is {status}".\
format(status=claim.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "PersistentVolumeClaim is created"
def _check_status_namespace(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
name_space = k8s_client_dict[api_version].read_namespace(name=name)
if name_space.status.phase != 'Active':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Namespace is still creating." \
"The status is {status}". \
format(status=name_space.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Namespace is created"
def _check_status_node(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
node = k8s_client_dict[api_version].read_node(name=name)
status_flag = False
for condition in node.status.conditions:
if condition.type == 'Ready':
if condition.status == 'True':
status_flag = True
break
else:
continue
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "Node is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Node is still creating." \
"The status is False"
def _check_status_persistent_volume(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
volume = k8s_client_dict[api_version].\
read_persistent_volume(name=name)
if volume.status.phase != 'Available' and \
volume.status.phase != 'Bound':
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "PersistentVolume is still creating." \
"The status is {status}". \
format(status=volume.status.phase)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "PersistentVolume is created"
def _check_status_api_service(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
api_service = k8s_client_dict[api_version].read_api_service(name=name)
status_flag = False
for condition in api_service.status.conditions:
if condition.type == 'Available':
if condition.status == 'True':
status_flag = True
break
else:
continue
if status_flag:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = "APIService is created"
else:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "APIService is still creating." \
"The status is False"
def _check_status_daemon_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
daemon_set = k8s_client_dict[api_version].\
read_namespaced_daemon_set(namespace=namespace, name=name)
if daemon_set.status.desired_number_scheduled != \
daemon_set.status.number_ready:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "DaemonSet is still creating. " \
"The DaemonSet is ready {value1}/{value2}".\
format(value1=daemon_set.status.number_ready,
value2=daemon_set.status.desired_number_scheduled)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'DaemonSet is created'
def _check_status_deployment(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
deployment = k8s_client_dict[api_version].\
read_namespaced_deployment(namespace=namespace, name=name)
if deployment.status.replicas != deployment.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Deployment is still creating. " \
"The Deployment is ready {value1}/{value2}".\
format(value1=deployment.status.ready_replicas,
value2=deployment.status.replicas
)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'Deployment is created'
def _check_status_replica_set(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
replica_set = k8s_client_dict[api_version].\
read_namespaced_replica_set(namespace=namespace, name=name)
if replica_set.status.replicas != replica_set.status.ready_replicas:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "ReplicaSet is still creating. " \
"The ReplicaSet is ready {value1}/{value2}".\
format(value1=replica_set.status.ready_replicas,
value2=replica_set.status.replicas
)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'ReplicaSet is created'
def _check_status_job(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
job = k8s_client_dict[api_version].\
read_namespaced_job(namespace=namespace, name=name)
if job.spec.completions != job.status.succeeded:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "Job is still creating." \
"The status is {status}". \
format(status=job.spec.completions)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'Job is created'
def _check_status_volume_attachment(self, k8s_client_dict, k8s_obj,
namespace, name, api_version):
volume = k8s_client_dict[api_version].\
read_volume_attachment(name=name)
if not volume.status.attached:
k8s_obj['status'] = 'Creating'
k8s_obj['message'] = "VolumeAttachment is still creating." \
"The status is {status}". \
format(status=volume.status.attached)
else:
k8s_obj['status'] = 'Create_complete'
k8s_obj['message'] = 'VolumeAttachment is created'
def _get_pods_information(self, core_v1_api_client, deployment_info):
"""Get pod information"""
pods_information = list()
for i in range(0, len(deployment_info), 2):
namespace = deployment_info[i]
deployment_name = deployment_info[i + 1]
respone = \
core_v1_api_client.list_namespaced_pod(namespace=namespace)
for item in respone.items:
if deployment_name in item.metadata.name:
pods_information.append(item)
return pods_information
def _get_pod_status(self, pods_information):
pending_flag = False
unknown_flag = False
for pod_info in pods_information:
status = pod_info.status.phase
if status == 'Pending':
pending_flag = True
elif status == 'Unknown':
unknown_flag = True
if unknown_flag:
status = 'Unknown'
elif pending_flag:
status = 'Pending'
else:
status = 'Running'
return status
@log.log
def update(self, plugin, context, vnf_id, vnf_dict, vnf, auth_attr):
"""Update containerized VNF through ConfigMap data
In Kubernetes VIM, updating VNF will be updated by updating
ConfigMap data
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = \
self.kubernetes.get_core_v1_api_client(auth=auth_cred)
# update config attribute
config_yaml = vnf_dict.get('attributes', {}).get('config', '')
update_yaml = vnf['vnf'].get('attributes', {}).get('config', '')
LOG.debug('yaml orig %(orig)s update %(update)s',
{'orig': config_yaml, 'update': update_yaml})
# If config_yaml is None, yaml.safe_load() will raise Attribute
# Error. So set config_yaml to {}, if it is None.
if not config_yaml:
config_dict = {}
else:
config_dict = yaml.safe_load(config_yaml) or {}
update_dict = yaml.safe_load(update_yaml)
if not update_dict:
return
LOG.debug('dict orig %(orig)s update %(update)s',
{'orig': config_dict, 'update': update_dict})
utils.deep_update(config_dict, update_dict)
LOG.debug('dict new %(new)s update %(update)s',
{'new': config_dict, 'update': update_dict})
new_yaml = yaml.safe_dump(config_dict)
vnf_dict.setdefault('attributes', {})['config'] = new_yaml
deployment_info = vnf_id.split(",")
for i in range(0, len(deployment_info), 2):
namespace = deployment_info[i]
deployment_name = deployment_info[i + 1]
configmap_resp = core_v1_api_client.read_namespaced_config_map(
namespace=namespace,
name=deployment_name)
configmap_data = configmap_resp.data
new_configmap = {key: update_dict.get(key, configmap_data[key])
for key in configmap_data}
configmap_resp.data = new_configmap
core_v1_api_client.\
patch_namespaced_config_map(namespace=namespace,
name=deployment_name,
body=configmap_resp)
except Exception as e:
LOG.error('Updating VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
@log.log
def update_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None):
"""Update wait function"""
# TODO(phuoc): do nothing, will update it if we need actions
pass
def _delete_legacy(self, vnf_id, auth_cred):
"""Delete function"""
# initialize Kubernetes APIs
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
scaling_api_client = self.kubernetes.get_scaling_api_client(
auth=auth_cred)
deployment_names = vnf_id.split(COMMA_CHARACTER)
for i in range(0, len(deployment_names), 2):
namespace = deployment_names[i]
deployment_name = deployment_names[i + 1]
# delete ConfigMap if it exists
try:
body = {}
core_v1_api_client.delete_namespaced_config_map(
namespace=namespace,
name=deployment_name,
body=body)
LOG.debug('Successfully deleted ConfigMap %s',
deployment_name)
except Exception as e:
LOG.debug(e)
pass
# delete Service if it exists
try:
core_v1_api_client.delete_namespaced_service(
namespace=namespace,
name=deployment_name)
LOG.debug('Successfully deleted Service %s',
deployment_name)
except Exception as e:
LOG.debug(e)
pass
# delete Horizon Pod Auto-scaling if it exists
try:
body = client.V1DeleteOptions()
scaling_api_client.\
delete_namespaced_horizontal_pod_autoscaler(
namespace=namespace,
name=deployment_name,
body=body)
LOG.debug('Successfully deleted Horizon Pod Auto-Scaling '
'%s', deployment_name)
except Exception as e:
LOG.debug(e)
pass
# delete Deployment if it exists
try:
body = client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5)
app_v1_api_client.delete_namespaced_deployment(
namespace=namespace,
name=deployment_name,
body=body)
LOG.debug('Successfully deleted Deployment %s',
deployment_name)
except Exception as e:
LOG.debug(e)
pass
except Exception:
raise
def _select_delete_api(self, k8s_client_dict, namespace, name,
kind, api_version, body):
"""select kubernetes delete api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
kubernetes = translate_outputs.Transformer(
None, None, None, None)
try:
if 'namespaced' in kubernetes.method_value.get(kind):
delete_api = eval('k8s_client_dict[api_version].'
'delete_namespaced_%s' % snake_case_kind)
response = delete_api(name=name, namespace=namespace,
body=body)
else:
delete_api = eval('k8s_client_dict[api_version].'
'delete_%s' % snake_case_kind)
response = delete_api(name=name, body=body)
except Exception:
raise
return response
def _get_pvc_list_for_delete(self, k8s_client_dict, sfs_name, namespace):
pvc_list_for_delete = list()
try:
resp_read_sfs = k8s_client_dict['apps/v1'].\
read_namespaced_stateful_set(sfs_name, namespace)
sfs_spec = resp_read_sfs.spec
volume_claim_templates = list()
volume_claim_templates = sfs_spec.volume_claim_templates
try:
resp_list_pvc = k8s_client_dict['v1'].\
list_namespaced_persistent_volume_claim(namespace)
pvc_list = resp_list_pvc.items
for volume_claim_template in volume_claim_templates:
pvc_template_metadata = volume_claim_template.metadata
match_pattern = '-'.join(
[pvc_template_metadata.name, sfs_name, ""])
for pvc in pvc_list:
pvc_metadata = pvc.metadata
pvc_name = pvc_metadata.name
match_result = re.match(
match_pattern + '[0-9]+$', pvc_name)
if match_result is not None:
pvc_list_for_delete.append(pvc_name)
except Exception as e:
LOG.debug(e)
pass
except Exception as e:
LOG.debug(e)
pass
return pvc_list_for_delete
@log.log
def _delete_k8s_obj(self, kind, k8s_client_dict, vnf_resource, body):
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
api_version = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[0]
pvc_list_for_delete = list()
# if kind is StatefulSet, create name list for deleting
# PersistentVolumeClaim created when StatefulSet is generated.
if kind == 'StatefulSet':
pvc_list_for_delete = \
self._get_pvc_list_for_delete(
k8s_client_dict=k8s_client_dict,
sfs_name=name,
namespace=namespace)
# delete target k8s obj
try:
self._select_delete_api(
k8s_client_dict=k8s_client_dict,
namespace=namespace,
name=name,
kind=kind,
api_version=api_version,
body=body)
LOG.debug('Successfully deleted resource: '
'kind=%(kind)s, name=%(name)s',
{"kind": kind, "name": name})
except Exception as e:
LOG.debug(e)
pass
if (kind == 'StatefulSet' and
len(pvc_list_for_delete) > 0):
for delete_pvc_name in pvc_list_for_delete:
try:
k8s_client_dict['v1'].\
delete_namespaced_persistent_volume_claim(
name=delete_pvc_name,
namespace=namespace,
body=body)
except Exception as e:
LOG.debug(e)
pass
def _get_helm_info(self, vim_connection_info):
# replace single quote to double quote
helm_info = vim_connection_info.extra.get('helm_info')
helm_info_dq = helm_info.replace("'", '"')
helm_info_dict = jsonutils.loads(helm_info_dq)
return helm_info_dict
def _helm_uninstall(self, context, vnf_instance):
inst_vnf_info = vnf_instance.instantiated_vnf_info
additional_params = inst_vnf_info.additional_params
namespace = additional_params.get('namespace', '')
helm_inst_param_list = additional_params.get(
'using_helm_install_param')
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
helm_info = self._get_helm_info(vim_connection_info)
ip_list = helm_info.get('masternode_ip')
username = helm_info.get('masternode_username')
password = helm_info.get('masternode_password')
k8s_objs = []
# initialize HelmClient
helmclient = helm_client.HelmClient(ip_list[0], username, password)
for helm_inst_params in helm_inst_param_list:
release_name = helm_inst_params.get('helmreleasename')
# execute `helm uninstall` command
helmclient.uninstall(release_name, namespace)
helmclient.close_session()
return k8s_objs
@log.log
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None,
vnf_instance=None, terminate_vnf_req=None):
"""Delete function"""
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not vnf_instance:
# execute legacy delete method
self._delete_legacy(vnf_id, auth_cred)
else:
# check use_helm flag
inst_vnf_info = vnf_instance.instantiated_vnf_info
if self._is_use_helm_flag(inst_vnf_info.additional_params):
self._helm_uninstall(context, vnf_instance)
return
# initialize Kubernetes APIs
k8s_client_dict = self.kubernetes.\
get_k8s_client_dict(auth=auth_cred)
# get V1DeleteOptions for deleting an API object
body = {}
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)
if terminate_vnf_req:
if terminate_vnf_req.termination_type == 'GRACEFUL':
grace_period_seconds = terminate_vnf_req.\
graceful_termination_timeout
elif terminate_vnf_req.termination_type == 'FORCEFUL':
grace_period_seconds = 0
body = client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=grace_period_seconds)
else:
body = client.V1DeleteOptions(
propagation_policy='Foreground')
# follow the order below to resolve dependency when deleting
ordered_kind = [
# 1.
'Deployment', 'Job', 'DaemonSet', 'StatefulSet',
# 2.
'Pod',
# 3.
'PersistentVolumeClaim', 'ConfigMap', 'Secret',
'PriorityClass',
# 4.
'PersistentVolume',
# 5.
'StorageClass',
# 6. Except for 1 to 5 above, delete before `Namespace`
'Service', 'LimitRange', 'PodTemplate', 'Node',
'ResourceQuota', 'ServiceAccount', 'APIService',
'ReplicaSet', 'ControllerRevision',
'HorizontalPodAutoscaler', 'Lease', 'NetworkPolicy',
'ClusterRole', 'ClusterRoleBinding', 'Role', 'RoleBinding',
'VolumeAttachment',
# 7. Delete `Namespace` finally
'Namespace'
]
for kind in ordered_kind:
for vnf_resource in vnf_resources:
obj_kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
if obj_kind == kind:
self._delete_k8s_obj(
kind=obj_kind,
k8s_client_dict=k8s_client_dict,
vnf_resource=vnf_resource,
body=body)
except Exception as e:
LOG.error('Deleting VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def _delete_wait_legacy(self, vnf_id, auth_cred):
"""Delete wait function for legacy
This function is used to checking a containerized VNF is deleted
completely or not. We do it by get information of Kubernetes objects.
When Tacker can not get any information about service, the VNF will be
marked as deleted.
"""
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
scaling_api_client = self.kubernetes.get_scaling_api_client(
auth=auth_cred)
deployment_names = vnf_id.split(COMMA_CHARACTER)
keep_going = True
stack_retries = self.STACK_RETRIES
while keep_going and stack_retries > 0:
count = 0
for i in range(0, len(deployment_names), 2):
namespace = deployment_names[i]
deployment_name = deployment_names[i + 1]
try:
core_v1_api_client.read_namespaced_config_map(
namespace=namespace,
name=deployment_name)
count = count + 1
except Exception:
pass
try:
core_v1_api_client.read_namespaced_service(
namespace=namespace,
name=deployment_name)
count = count + 1
except Exception:
pass
try:
scaling_api_client.\
read_namespaced_horizontal_pod_autoscaler(
namespace=namespace,
name=deployment_name)
count = count + 1
except Exception:
pass
try:
app_v1_api_client.read_namespaced_deployment(
namespace=namespace,
name=deployment_name)
count = count + 1
except Exception:
pass
stack_retries = stack_retries - 1
# If one of objects is still alive, keeps on waiting
if count > 0:
keep_going = True
time.sleep(self.STACK_RETRY_WAIT)
else:
keep_going = False
except Exception as e:
LOG.error('Deleting wait VNF got an error due to %s', e)
raise
def _select_k8s_obj_read_api(self, k8s_client_dict, namespace, name,
kind, api_version):
"""select kubernetes read api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
try:
if namespace:
read_api = eval('k8s_client_dict[api_version].'
'read_namespaced_%s' % snake_case_kind)
response = read_api(name=name, namespace=namespace)
else:
read_api = eval('k8s_client_dict[api_version].'
'read_%s' % snake_case_kind)
response = read_api(name=name)
except Exception:
raise
return response
def _post_helm_uninstall(self, context, vnf_instance):
inst_vnf_info = vnf_instance.instantiated_vnf_info
additional_params = inst_vnf_info.additional_params
helm_inst_param_list = additional_params.get(
'using_helm_install_param')
vim_info = vnflcm_utils._get_vim(context,
vnf_instance.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
helm_info = self._get_helm_info(vim_connection_info)
ip_list = helm_info.get('masternode_ip')
username = helm_info.get('masternode_username')
password = helm_info.get('masternode_password')
del_dir = os.path.join(HELM_CHART_DIR_BASE, vnf_instance.id)
for ip in ip_list:
local_helm_del_flag = False
# initialize HelmClient
helmclient = helm_client.HelmClient(ip, username, password)
for inst_params in helm_inst_param_list:
if self._is_exthelmchart(inst_params):
repo_name = inst_params.get('helmrepositoryname')
# execute `helm repo add` command
helmclient.remove_repository(repo_name)
else:
local_helm_del_flag = True
if local_helm_del_flag:
helmclient.delete_helmchart(del_dir)
helmclient.close_session()
@log.log
def delete_wait(self, plugin, context, vnf_id, auth_attr,
region_name=None, vnf_instance=None):
"""Delete wait function
This function is used to checking a containerized VNF is deleted
completely or not. We do it by get information of Kubernetes objects.
When Tacker can not get any information about service, the VNF will be
marked as deleted.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not vnf_instance:
# execute legacy delete_wait method
self._delete_wait_legacy(vnf_id, auth_cred)
else:
vnf_resources = objects.VnfResourceList.\
get_by_vnf_instance_id(context, vnf_instance.id)
k8s_client_dict = self.kubernetes.\
get_k8s_client_dict(auth=auth_cred)
keep_going = True
stack_retries = self.STACK_RETRIES
while keep_going and stack_retries > 0:
count = 0
for vnf_resource in vnf_resources:
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
api_version = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[0]
kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
if not k8s_client_dict.get(api_version):
continue
try:
self._select_k8s_obj_read_api(
k8s_client_dict=k8s_client_dict,
namespace=namespace,
name=name,
kind=kind,
api_version=api_version)
count = count + 1
except Exception:
pass
stack_retries = stack_retries - 1
# If one of objects is still alive, keeps on waiting
if count > 0:
keep_going = True
time.sleep(self.STACK_RETRY_WAIT)
else:
keep_going = False
# check use_helm flag
inst_vnf_info = vnf_instance.instantiated_vnf_info
if self._is_use_helm_flag(inst_vnf_info.additional_params):
self._post_helm_uninstall(context, vnf_instance)
except Exception as e:
LOG.error('Deleting wait VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def _scale_legacy(self, policy, auth_cred):
LOG.debug("VNF are scaled by updating instance of deployment")
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
scaling_api_client = self.kubernetes.get_scaling_api_client(
auth=auth_cred)
deployment_names = policy['instance_id'].split(COMMA_CHARACTER)
policy_name = policy['name']
policy_action = policy['action']
for i in range(0, len(deployment_names), 2):
namespace = deployment_names[i]
deployment_name = deployment_names[i + 1]
deployment_info = app_v1_api_client.\
read_namespaced_deployment(namespace=namespace,
name=deployment_name)
scaling_info = scaling_api_client.\
read_namespaced_horizontal_pod_autoscaler(
namespace=namespace,
name=deployment_name)
replicas = deployment_info.status.replicas
scale_replicas = replicas
vnf_scaling_name = deployment_info.metadata.labels.\
get("scaling_name")
if vnf_scaling_name == policy_name:
if policy_action == 'out':
scale_replicas = replicas + 1
elif policy_action == 'in':
scale_replicas = replicas - 1
min_replicas = scaling_info.spec.min_replicas
max_replicas = scaling_info.spec.max_replicas
if (scale_replicas < min_replicas) or \
(scale_replicas > max_replicas):
LOG.debug("Scaling replicas is out of range. The number of"
" replicas keeps %(number)s replicas",
{'number': replicas})
scale_replicas = replicas
deployment_info.spec.replicas = scale_replicas
app_v1_api_client.patch_namespaced_deployment_scale(
namespace=namespace,
name=deployment_name,
body=deployment_info)
def _call_read_scale_api(self, app_v1_api_client, namespace, name, kind):
"""select kubernetes read scale api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
try:
read_scale_api = eval('app_v1_api_client.'
'read_namespaced_%s_scale' % snake_case_kind)
response = read_scale_api(name=name, namespace=namespace)
except Exception as e:
error_reason = _("Failed the request to read a scale information."
" namespace: {namespace}, name: {name},"
" kind: {kind}, Reason: {exception}").format(
namespace=namespace, name=name, kind=kind, exception=e)
raise vnfm.CNFScaleFailed(reason=error_reason)
return response
def _call_patch_scale_api(self, app_v1_api_client, namespace, name,
kind, body):
"""select kubernetes patch scale api and call"""
def convert(name):
name_with_underscores = re.sub(
'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
name_with_underscores).lower()
snake_case_kind = convert(kind)
try:
patch_scale_api = eval('app_v1_api_client.'
'patch_namespaced_%s_scale' % snake_case_kind)
response = patch_scale_api(name=name, namespace=namespace,
body=body)
except Exception as e:
error_reason = _("Failed the request to update a scale information"
". namespace: {namespace}, name: {name},"
" kind: {kind}, Reason: {exception}").format(
namespace=namespace, name=name, kind=kind, exception=e)
raise vnfm.CNFScaleFailed(reason=error_reason)
return response
@log.log
def scale(self, context, plugin, auth_attr, policy, region_name):
"""Scale function
Scaling VNF is implemented by updating replicas through Kubernetes API.
The min_replicas and max_replicas is limited by the number of replicas
of policy scaling when user define VNF descriptor.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not policy.get('vnf_instance_id'):
# execute legacy scale method
self._scale_legacy(policy, auth_cred)
else:
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
aspect_id = policy['name']
vdu_defs = policy['vdu_defs']
is_found = False
error_reason = None
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
for vnf_resource in vnf_resources:
# The resource that matches the following is the resource
# to be scaled:
# The `name` of the resource stored in vnf_resource (the
# name defined in `metadata.name` of Kubernetes object
# file) matches the value of `properties.name` of VDU
# defined in VNFD.
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
for vdu_id, vdu_def in vdu_defs.items():
vdu_properties = vdu_def.get('properties')
if name == vdu_properties.get('name'):
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
if not namespace:
namespace = "default"
kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
if kind in target_kinds:
is_found = True
break
if is_found:
break
else:
error_reason = _(
"Target VnfResource for aspectId"
" {aspect_id} is not found in DB").format(
aspect_id=aspect_id)
raise vnfm.CNFScaleFailed(reason=error_reason)
scale_info = self._call_read_scale_api(
app_v1_api_client=app_v1_api_client,
namespace=namespace,
name=name,
kind=kind)
current_replicas = scale_info.status.replicas
vdu_profile = vdu_properties.get('vdu_profile')
if policy['action'] == 'out':
scale_replicas = current_replicas + policy['delta_num']
elif policy['action'] == 'in':
scale_replicas = current_replicas - policy['delta_num']
max_replicas = vdu_profile.get('max_number_of_instances')
min_replicas = vdu_profile.get('min_number_of_instances')
if (scale_replicas < min_replicas) or \
(scale_replicas > max_replicas):
error_reason = _(
"The number of target replicas after"
" scaling [{after_replicas}] is out of range").\
format(
after_replicas=scale_replicas)
raise vnfm.CNFScaleFailed(reason=error_reason)
scale_info.spec.replicas = scale_replicas
self._call_patch_scale_api(
app_v1_api_client=app_v1_api_client,
namespace=namespace,
name=name,
kind=kind,
body=scale_info)
except Exception as e:
LOG.error('Scaling VNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def _scale_wait_legacy(self, policy, auth_cred):
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
deployment_info = policy['instance_id'].split(",")
pods_information = self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'Pending' and stack_retries > 0:
time.sleep(self.STACK_RETRY_WAIT)
pods_information = self._get_pods_information(
core_v1_api_client=core_v1_api_client,
deployment_info=deployment_info)
status = self._get_pod_status(pods_information)
# LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
LOG.debug('VNF initializing status: %(service_name)s %(status)s',
{'service_name': str(deployment_info), 'status': status})
if stack_retries == 0 and status != 'Running':
error_reason = _("Resource creation is not completed within"
" {wait} seconds as creation of stack {stack}"
" is not completed").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=policy['instance_id'])
LOG.error("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
elif stack_retries != 0 and status != 'Running':
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
def _is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name):
match_result = None
if rsc_kind == 'Pod':
# Expected example: name
if rsc_name == pod_name:
match_result = True
elif rsc_kind == 'Deployment':
# Expected example: name-012789abef-019az
# NOTE(horie): The naming rule of Pod in deployment is
# "(deployment name)-(pod template hash)-(5 charactors)".
# The "pod template hash" string is generated from 32 bit hash.
# This may be from 1 to 10 caracters but not sure the lower limit
# from the source code of Kubernetes.
match_result = re.match(
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$',
pod_name)
elif rsc_kind == 'ReplicaSet' or rsc_kind == 'DaemonSet':
# Expected example: name-019az
match_result = re.match(
rsc_name + '-([0-9a-z]{5})+$',
pod_name)
elif rsc_kind == 'StatefulSet':
# Expected example: name-0
match_result = re.match(
rsc_name + '-[0-9]+$',
pod_name)
if match_result:
return True
else:
return False
def scale_wait(self, context, plugin, auth_attr, policy, region_name,
last_event_id):
"""Scale wait function
Scale wait function will marked VNF is ACTIVE when all status state
from Pod objects is RUNNING.
"""
# initialize Kubernetes APIs
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
if not policy.get('vnf_instance_id'):
# execute legacy scale_wait method
self._scale_wait_legacy(policy, auth_cred)
else:
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, policy['vnf_instance_id'])
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
aspect_id = policy['name']
vdu_defs = policy['vdu_defs']
is_found = False
error_reason = None
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
for vnf_resource in vnf_resources:
name = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[1]
for vdu_id, vdu_def in vdu_defs.items():
vdu_properties = vdu_def.get('properties')
if name == vdu_properties.get('name'):
namespace = vnf_resource.resource_name.\
split(COMMA_CHARACTER)[0]
if not namespace:
namespace = "default"
kind = vnf_resource.resource_type.\
split(COMMA_CHARACTER)[1]
if kind in target_kinds:
is_found = True
break
if is_found:
break
else:
error_reason = _(
"Target VnfResource for aspectId {aspect_id}"
" is not found in DB").format(
aspect_id=aspect_id)
raise vnfm.CNFScaleWaitFailed(reason=error_reason)
scale_info = self._call_read_scale_api(
app_v1_api_client=app_v1_api_client,
namespace=namespace,
name=name,
kind=kind)
status = 'Pending'
stack_retries = self.STACK_RETRIES
error_reason = None
while status == 'Pending' and stack_retries > 0:
pods_information = list()
respone = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
for pod in respone.items:
match_result = self._is_match_pod_naming_rule(
kind, name, pod.metadata.name)
if match_result:
pods_information.append(pod)
status = self._get_pod_status(pods_information)
if status == 'Running' and \
scale_info.spec.replicas != len(pods_information):
status = 'Pending'
if status == 'Pending':
stack_retries = stack_retries - 1
time.sleep(self.STACK_RETRY_WAIT)
elif status == 'Unknown':
error_reason = _(
"CNF Scale failed caused by the Pod status"
" is Unknown")
raise vnfm.CNFScaleWaitFailed(reason=error_reason)
if stack_retries == 0 and status != 'Running':
error_reason = _(
"CNF Scale failed to complete within"
" {wait} seconds while waiting for the aspect_id"
" {aspect_id} to be scaled").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
aspect_id=aspect_id)
LOG.error("CNF Scale failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFScaleWaitFailed(reason=error_reason)
except Exception as e:
LOG.error('Scaling wait CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
@log.log
def get_resource_info(self, plugin, context, vnf_info, auth_attr,
region_name=None):
# TODO(phuoc): will update it for other components
pass
def _get_auth_creds(self, auth_cred):
file_descriptor = self._create_ssl_ca_file(auth_cred)
if ('username' not in auth_cred) and ('password' not in auth_cred):
auth_cred['username'] = 'None'
auth_cred['password'] = None
return auth_cred, file_descriptor
def _create_ssl_ca_file(self, auth_attr):
ca_cert = utils.none_from_string(auth_attr.get('ssl_ca_cert'))
if ca_cert is not None:
file_descriptor, file_path = \
self.kubernetes.create_ca_cert_tmp_file(ca_cert)
auth_attr['ca_cert_file'] = file_path
return file_descriptor
else:
return None
def clean_authenticate_vim(self, vim_auth, file_descriptor):
# remove ca_cert_file from vim_obj if it exists
# close and delete temp ca_cert_file
if file_descriptor is not None:
file_path = vim_auth.pop('ca_cert_file')
self.kubernetes.close_tmp_file(file_descriptor, file_path)
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
pass
def _is_use_helm_flag(self, additional_params):
if not additional_params:
return False
use_helm = additional_params.get('use_helm')
if type(use_helm) == str:
return use_helm.lower() == 'true'
return bool(use_helm)
def _is_exthelmchart(self, helm_install_params):
exthelmchart = helm_install_params.get('exthelmchart')
if type(exthelmchart) == str:
return exthelmchart.lower() == 'true'
return bool(exthelmchart)
def _pre_helm_install(self, vim_connection_info,
instantiate_vnf_req, vnf_package_path):
def _check_param_exists(params_dict, check_param):
if check_param not in params_dict.keys():
LOG.error("{check_param} is not found".format(
check_param=check_param))
raise cs.InputValuesMissing(key=check_param)
# check helm info in vim_connection_info
if 'helm_info' not in vim_connection_info.extra.keys():
reason = "helm_info is missing in vim_connection_info.extra."
LOG.error(reason)
raise vnfm.InvalidVimConnectionInfo(reason=reason)
helm_info = self._get_helm_info(vim_connection_info)
ip_list = helm_info.get('masternode_ip', [])
username = helm_info.get('masternode_username', '')
password = helm_info.get('masternode_username', '')
if not (ip_list and username and password):
reason = "content of helm_info is invalid."
LOG.error(reason)
raise vnfm.InvalidVimConnectionInfo(reason=reason)
# check helm install params
additional_params = instantiate_vnf_req.additional_params
_check_param_exists(additional_params, 'using_helm_install_param')
helm_install_param_list = additional_params.get(
'using_helm_install_param', [])
if not helm_install_param_list:
LOG.error("using_helm_install_param is empty.")
raise cs.InputValuesMissing(key='using_helm_install_param')
for helm_install_params in helm_install_param_list:
# common parameter check
_check_param_exists(helm_install_params, 'exthelmchart')
_check_param_exists(helm_install_params, 'helmreleasename')
if self._is_exthelmchart(helm_install_params):
# parameter check (case: external helm chart)
_check_param_exists(helm_install_params, 'helmchartname')
_check_param_exists(helm_install_params, 'exthelmrepo_url')
_check_param_exists(helm_install_params, 'helmrepositoryname')
else:
# parameter check (case: local helm chart)
_check_param_exists(helm_install_params, 'helmchartfile_path')
chartfile_path = helm_install_params.get('helmchartfile_path')
abs_helm_chart_path = os.path.join(
vnf_package_path, chartfile_path)
if not os.path.exists(abs_helm_chart_path):
LOG.error('Helm chart file {path} is not found.'.format(
path=chartfile_path))
raise vnfm.CnfDefinitionNotFound(path=chartfile_path)
def _get_target_k8s_files(self, instantiate_vnf_req):
if instantiate_vnf_req.additional_params and\
CNF_TARGET_FILES_KEY in\
instantiate_vnf_req.additional_params.keys():
target_k8s_files = instantiate_vnf_req.\
additional_params['lcm-kubernetes-def-files']
else:
target_k8s_files = list()
return target_k8s_files
def _create_vnf_resource(self, context, vnf_instance, file_content_dict,
namespace=None):
vnf_resource = vnf_resource_obj.VnfResource(
context=context)
vnf_resource.vnf_instance_id = vnf_instance.id
metadata = file_content_dict.get('metadata', {})
if metadata and metadata.get('namespace', ''):
namespace = metadata.get('namespace', '')
elif namespace:
namespace = namespace
else:
namespace = ''
vnf_resource.resource_name = ','.join([
namespace, metadata.get('name', '')])
vnf_resource.resource_type = ','.join([
file_content_dict.get('apiVersion', ''),
file_content_dict.get('kind', '')])
vnf_resource.resource_identifier = ''
vnf_resource.resource_status = ''
return vnf_resource
def pre_instantiation_vnf(self, context, vnf_instance,
vim_connection_info, vnf_software_images,
instantiate_vnf_req, vnf_package_path):
# check use_helm flag
if self._is_use_helm_flag(instantiate_vnf_req.additional_params):
# parameter check
self._pre_helm_install(
vim_connection_info, instantiate_vnf_req, vnf_package_path)
# NOTE: In case of using helm, vnf_resources is created
# after `helm install` command is executed.
return {}
vnf_resources = dict()
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
if not target_k8s_files:
# if artifact_files is not provided in request,
# we consider k8s info in provided by TOSCA-based VNFD
# and we will push the request to existed code
return vnf_resources
else:
vnfd = vnfd_obj.VnfPackageVnfd.get_by_id(
context, vnf_instance.vnfd_id)
package_uuid = vnfd.package_uuid
vnf_package = vnf_package_obj.VnfPackage.get_by_id(
context, package_uuid, expected_attrs=['vnf_artifacts'])
if vnf_package.vnf_artifacts:
vnf_artifacts = vnf_package.vnf_artifacts
length = len(vnf_artifacts)
for target_k8s_file in target_k8s_files:
for index, vnf_artifact in enumerate(vnf_artifacts):
if vnf_artifact.artifact_path == target_k8s_file:
break
if length > 1 and index < length - 1:
continue
LOG.debug('CNF Artifact {path} is not found.'.format(
path=target_k8s_file))
setattr(vnf_instance, 'vim_connection_info', [])
setattr(vnf_instance, 'task_state', None)
vnf_instance.save()
raise vnfm.CnfDefinitionNotFound(
path=target_k8s_file)
else:
LOG.debug('VNF Artifact {path} is not found.'.format(
path=vnf_package.vnf_artifacts))
setattr(vnf_instance, 'vim_connection_info', [])
setattr(vnf_instance, 'task_state', None)
vnf_instance.save()
raise exceptions.VnfArtifactNotFound(id=vnf_package.id)
for target_k8s_index, target_k8s_file \
in enumerate(target_k8s_files):
if ((urlparse(target_k8s_file).scheme == 'file') or
(bool(urlparse(target_k8s_file).scheme) and
bool(urlparse(target_k8s_file).netloc))):
file_content = urllib2.urlopen(target_k8s_file).read()
else:
target_k8s_file_path = os.path.join(
vnf_package_path, target_k8s_file)
with open(target_k8s_file_path, 'r') as f:
file_content = f.read()
file_content_dict_list = yaml.safe_load_all(file_content)
vnf_resources_temp = []
for file_content_dict in file_content_dict_list:
vnf_resource = self._create_vnf_resource(
context, vnf_instance, file_content_dict)
vnf_resources_temp.append(vnf_resource)
vnf_resources[target_k8s_index] = vnf_resources_temp
return vnf_resources
def delete_vnf_instance_resource(self, context, vnf_instance,
vim_connection_info, vnf_resource):
pass
def _helm_install(self, context, vnf_instance, vim_connection_info,
instantiate_vnf_req, vnf_package_path, transformer):
additional_params = instantiate_vnf_req.additional_params
namespace = additional_params.get('namespace', '')
helm_inst_param_list = additional_params.get(
'using_helm_install_param')
helm_info = self._get_helm_info(vim_connection_info)
ip_list = helm_info.get('masternode_ip')
username = helm_info.get('masternode_username')
password = helm_info.get('masternode_password')
vnf_resources = []
k8s_objs = []
for ip_idx, ip in enumerate(ip_list):
# initialize HelmClient
helmclient = helm_client.HelmClient(ip, username, password)
for inst_params in helm_inst_param_list:
release_name = inst_params.get('helmreleasename')
parameters = inst_params.get('helmparameter')
if self._is_exthelmchart(inst_params):
# prepare using external helm chart
chart_name = inst_params.get('helmchartname')
repo_url = inst_params.get('exthelmrepo_url')
repo_name = inst_params.get('helmrepositoryname')
# execute `helm repo add` command
helmclient.add_repository(repo_name, repo_url)
install_chart_name = '/'.join([repo_name, chart_name])
else:
# prepare using local helm chart
chartfile_path = inst_params.get('helmchartfile_path')
src_path = os.path.join(vnf_package_path, chartfile_path)
dst_dir = os.path.join(
HELM_CHART_DIR_BASE, vnf_instance.id)
# put helm chart file to Kubernetes controller node
helmclient.put_helmchart(src_path, dst_dir)
chart_file_name = src_path[src_path.rfind(os.sep) + 1:]
chart_name = "-".join(chart_file_name.split("-")[:-1])
install_chart_name = os.path.join(dst_dir, chart_name)
if ip_idx == 0:
# execute `helm install` command
helmclient.install(release_name, install_chart_name,
namespace, parameters)
# get manifest by using `helm get manifest` command
mf_content = helmclient.get_manifest(
release_name, namespace)
k8s_objs_tmp = transformer.get_k8s_objs_from_manifest(
mf_content, namespace)
for k8s_obj in k8s_objs_tmp:
# set status in k8s_obj to 'Creating'
k8s_obj['status'] = 'Creating'
k8s_objs.extend(k8s_objs_tmp)
mf_content_dicts = list(yaml.safe_load_all(mf_content))
for mf_content_dict in mf_content_dicts:
vnf_resource = self._create_vnf_resource(
context, vnf_instance, mf_content_dict, namespace)
vnf_resources.append(vnf_resource)
helmclient.close_session()
# save the vnf resources in the db
for vnf_resource in vnf_resources:
vnf_resource.create()
return k8s_objs
def instantiate_vnf(self, context, vnf_instance, vnfd_dict,
vim_connection_info, instantiate_vnf_req,
grant_response, vnf_package_path,
plugin=None):
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
auth_attr = vim_connection_info.access_info
use_helm_flag = self._is_use_helm_flag(
instantiate_vnf_req.additional_params)
if not target_k8s_files and not use_helm_flag:
# The case is based on TOSCA for CNF operation.
# It is out of the scope of this patch.
instance_id = self.create(
None, context, vnf_instance, auth_attr)
return instance_id
else:
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
k8s_client_dict = self.kubernetes.get_k8s_client_dict(auth_cred)
transformer = translate_outputs.Transformer(
None, None, None, k8s_client_dict)
deployment_dict_list = list()
if use_helm_flag:
k8s_objs = self._helm_install(
context, vnf_instance, vim_connection_info,
instantiate_vnf_req, vnf_package_path, transformer)
else:
k8s_objs = transformer.\
get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path)
k8s_objs = transformer.deploy_k8s(k8s_objs)
vnfd_dict['current_error_point'] = EP.POST_VIM_CONTROL
k8s_objs = self.create_wait_k8s(
k8s_objs, k8s_client_dict, vnf_instance)
for k8s_obj in k8s_objs:
deployment_dict = dict()
deployment_dict['namespace'] = k8s_obj.get('namespace')
if k8s_obj.get('object').metadata:
deployment_dict['name'] = k8s_obj.get('object').\
metadata.name
else:
deployment_dict['name'] = ''
deployment_dict['apiVersion'] = k8s_obj.get(
'object').api_version
deployment_dict['kind'] = k8s_obj.get('object').kind
deployment_dict['status'] = k8s_obj.get('status')
deployment_dict_list.append(deployment_dict)
deployment_str_list = [str(x) for x in deployment_dict_list]
# all the deployment object will store into resource_info_str.
# and the instance_id is created from all deployment_dict.
resource_info_str = ';'.join(deployment_str_list)
self.clean_authenticate_vim(auth_cred, file_descriptor)
vnfd_dict['instance_id'] = resource_info_str
return resource_info_str
def _post_helm_install(self, context, vim_connection_info,
instantiate_vnf_req, transformer):
additional_params = instantiate_vnf_req.additional_params
namespace = additional_params.get('namespace', '')
helm_inst_param_list = additional_params.get(
'using_helm_install_param')
helm_info = self._get_helm_info(vim_connection_info)
ip_list = helm_info.get('masternode_ip')
username = helm_info.get('masternode_username')
password = helm_info.get('masternode_password')
k8s_objs = []
# initialize HelmClient
helmclient = helm_client.HelmClient(ip_list[0], username, password)
for helm_inst_params in helm_inst_param_list:
release_name = helm_inst_params.get('helmreleasename')
# get manifest by using `helm get manifest` command
mf_content = helmclient.get_manifest(release_name, namespace)
k8s_objs_tmp = transformer.get_k8s_objs_from_manifest(
mf_content, namespace)
k8s_objs.extend(k8s_objs_tmp)
helmclient.close_session()
return k8s_objs
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info, instantiate_vnf_req):
"""Initially store VnfcResourceInfo after instantiation
After instantiation, this function gets pods information from
Kubernetes VIM and store information such as pod name and resource kind
and metadata, and vdu id.
"""
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
# get Kubernetes object files
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
# initialize Transformer
transformer = translate_outputs.Transformer(
None, None, None, None)
if self._is_use_helm_flag(instantiate_vnf_req.additional_params):
k8s_objs = self._post_helm_install(context,
vim_connection_info, instantiate_vnf_req, transformer)
else:
# get Kubernetes object
k8s_objs = transformer.get_k8s_objs_from_yaml(
target_k8s_files, vnf_package_path)
# get TOSCA node templates
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(
parsed_params={}, a_file=False, yaml_dict_tpl=vnfd_dict)
tosca_node_tpls = tosca.topology_template.nodetemplates
# get vdu_ids dict {vdu_name(as pod_name): vdu_id}
vdu_ids = {}
for node_tpl in tosca_node_tpls:
for node_name, node_value in node_tpl.templates.items():
if node_value.get('type') == "tosca.nodes.nfv.Vdu.Compute":
vdu_id = node_name
vdu_name = node_value.get('properties').get('name')
vdu_ids[vdu_name] = vdu_id
# initialize Kubernetes APIs
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
target_kinds = ["Pod", "Deployment", "DaemonSet", "StatefulSet",
"ReplicaSet"]
pod_list_dict = {}
vnfc_resource_list = []
for k8s_obj in k8s_objs:
rsc_kind = k8s_obj.get('object').kind
if rsc_kind not in target_kinds:
# Skip if rsc_kind is not target kind
continue
rsc_name = k8s_obj.get('object').metadata.name
namespace = k8s_obj.get('object').metadata.namespace
if not namespace:
namespace = "default"
# get V1PodList by namespace
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
pod_list_dict[namespace] = pod_list
# get initially store VnfcResourceInfo after instantiation
for pod in pod_list.items:
pod_name = pod.metadata.name
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod_name)
if match_result:
# get metadata
metadata = {}
metadata[rsc_kind] = jsonutils.dumps(
k8s_obj.get('object').metadata.to_dict())
if rsc_kind != 'Pod':
metadata['Pod'] = jsonutils.dumps(
k8s_obj.get('object').spec.template.metadata.
to_dict())
# generate VnfcResourceInfo
vnfc_resource = objects.VnfcResourceInfo()
vnfc_resource.id = uuidutils.generate_uuid()
vnfc_resource.vdu_id = vdu_ids.get(rsc_name)
resource = objects.ResourceHandle()
resource.resource_id = pod_name
resource.vim_level_resource_type = rsc_kind
vnfc_resource.compute_resource = resource
vnfc_resource.metadata = metadata
vnfc_resource_list.append(vnfc_resource)
if vnfc_resource_list:
inst_vnf_info = vnf_instance.instantiated_vnf_info
inst_vnf_info.vnfc_resource_info = vnfc_resource_list
except Exception as e:
LOG.error('Update vnfc resource info got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def _get_vnfc_rscs_with_vnfc_id(self, inst_vnf_info, heal_vnf_request):
if not heal_vnf_request.vnfc_instance_id:
# include all vnfc resources
return [resource for resource in inst_vnf_info.vnfc_resource_info]
vnfc_resources = []
for vnfc_resource in inst_vnf_info.vnfc_resource_info:
if vnfc_resource.id in heal_vnf_request.vnfc_instance_id:
vnfc_resources.append(vnfc_resource)
return vnfc_resources
def _get_added_pod_names(self, core_v1_api_client, inst_vnf_info, vdu_id,
vnfc_resource, pod_list_dict):
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(rsc_kind))
namespace = rsc_metadata.get('namespace')
if not namespace:
namespace = "default"
rsc_name = rsc_metadata.get('name')
# Get pod list from kubernetes
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
pod_list_dict[namespace] = pod_list
# Sort by newest creation_timestamp
sorted_pod_list = sorted(pod_list.items, key=lambda x:
x.metadata.creation_timestamp, reverse=True)
# Get the associated pod name that runs with the actual kubernetes
actual_pod_names = list()
for pod in sorted_pod_list:
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod.metadata.name)
if match_result:
actual_pod_names.append(pod.metadata.name)
# Get the associated pod name stored in vnfcResourceInfo
stored_pod_names = []
for vnfc_rsc_info in inst_vnf_info.vnfc_resource_info:
if vnfc_rsc_info.vdu_id == vnfc_resource.vdu_id:
stored_pod_names.append(
vnfc_rsc_info.compute_resource.resource_id)
# Get the added pod name that does not exist in vnfcResourceInfo
added_pod_names = [
actl_pn for actl_pn in actual_pod_names
if actl_pn not in stored_pod_names
]
return actual_pod_names, added_pod_names
def heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
"""Heal function
This function heals vnfc instances (mapped as Pod),
and update vnfcResourceInfo which are not the target of healing
before healing operation.
"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
# get vnfc_resource_info list for healing
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=inst_vnf_info,
heal_vnf_request=heal_vnf_request
)
# Updates resource_id in vnfc_resource_info which are not the
# target of healing before heal operation because they may have
# been re-created by kubelet of Kubernetes automatically and their
# resource_id (as Pod name) have been already changed
updated_vdu_ids = []
pod_list_dict = {}
for vnfc_resource in vnfc_resources:
vdu_id = vnfc_resource.vdu_id
if vdu_id in updated_vdu_ids:
# For updated vdu_id, go to the next Loop
continue
actual_pod_names, added_pod_names = self._get_added_pod_names(
core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource,
pod_list_dict)
if added_pod_names:
heal_target_ids = heal_vnf_request.vnfc_instance_id
for vnfc_rsc in inst_vnf_info.vnfc_resource_info:
stored_pod_name = vnfc_rsc.compute_resource.resource_id
# Updated vnfcResourceInfo of the same vdu_id other
# than heal target
if (vnfc_rsc.id not in heal_target_ids) and\
(vdu_id == vnfc_rsc.vdu_id) and\
(stored_pod_name not in actual_pod_names):
pod_name = added_pod_names.pop()
vnfc_rsc.compute_resource.resource_id = pod_name
LOG.warning("Update resource_id before healing,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" pod_name:%(pod_name)s",
{'vnfc_id': vnfc_rsc.id,
'pod_name': pod_name})
if not added_pod_names:
break
updated_vdu_ids.append(vdu_id)
for vnfc_resource in vnfc_resources:
body = client.V1DeleteOptions(propagation_policy='Foreground')
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
pod_name = compute_resource.resource_id
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(rsc_kind))
namespace = rsc_metadata.get('namespace')
if not namespace:
namespace = "default"
if rsc_kind == 'Pod':
rsc_name = rsc_metadata.get('name')
# Get pod information for re-creation before deletion
pod_info = core_v1_api_client.read_namespaced_pod(
namespace=namespace,
name=rsc_name
)
# Delete Pod
core_v1_api_client.delete_namespaced_pod(
namespace=namespace,
name=pod_name,
body=body
)
# Check and wait that the Pod is deleted
stack_retries = self.STACK_RETRIES
for cnt in range(self.STACK_RETRIES):
try:
core_v1_api_client.read_namespaced_pod(
namespace=namespace,
name=pod_name
)
except Exception as e:
if e.status == 404:
break
else:
error_reason = _("Failed the request to read a"
" Pod information. namespace: {namespace},"
" pod_name: {name}, kind: {kind}, Reason: "
"{exception}").format(
namespace=namespace, name=pod_name,
kind=rsc_kind, exception=e)
raise vnfm.CNFHealFailed(reason=error_reason)
stack_retries = stack_retries - 1
time.sleep(self.STACK_RETRY_WAIT)
# Number of retries exceeded retry count
if stack_retries == 0:
error_reason = _("Resource healing is not completed"
"within {wait} seconds").format(wait=(
self.STACK_RETRIES * self.STACK_RETRY_WAIT))
LOG.error("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealFailed(reason=error_reason)
# Recreate pod using retained pod_info
transformer = translate_outputs.Transformer(
None, None, None, None)
metadata = transformer.get_object_meta(rsc_metadata)
body = client.V1Pod(metadata=metadata, spec=pod_info.spec)
core_v1_api_client.create_namespaced_pod(
namespace=namespace,
body=body
)
elif (rsc_kind in ['Deployment', 'DaemonSet', 'StatefulSet',
'ReplicaSet']):
try:
# Delete Pod (Pod is automatically re-created)
core_v1_api_client.delete_namespaced_pod(
namespace=namespace,
name=pod_name,
body=body
)
except Exception as e:
if e.status == 404:
# If when the pod to be deleted does not exist,
# change resource_id to "POD_NOT_FOUND"
compute_resource = vnfc_resource.compute_resource
compute_resource.resource_id = VNFC_POD_NOT_FOUND
LOG.warning("Target pod to delete is not found,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" pod_name:%(pod_name)s",
{'vnfc_id': vnfc_resource.id,
'pod_name': pod_name})
else:
error_reason = _("Failed the request to delete a "
"Pod. namespace: {namespace}, pod_name: {name}"
", kind: {kind}, Reason: {exception}").format(
namespace=namespace, name=pod_name,
kind=rsc_kind, exception=e)
raise vnfm.CNFHealFailed(reason=error_reason)
else:
error_reason = _(
"{vnfc_instance_id} is a kind of Kubertnetes"
" resource that is not covered").format(
vnfc_instance_id=vnfc_resource.id)
LOG.error("CNF Heal failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealFailed(reason=error_reason)
except Exception as e:
LOG.error('Healing CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def heal_vnf_wait(self, context, vnf_instance,
vim_connection_info, heal_vnf_request):
"""heal wait function
Wait until all status from Pod objects is RUNNING.
"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=vnf_instance.instantiated_vnf_info,
heal_vnf_request=heal_vnf_request)
# Exclude entries where pods were not found when heal
vnfc_resources = [rsc for rsc in vnfc_resources
if rsc.compute_resource.
resource_id != VNFC_POD_NOT_FOUND]
if not vnfc_resources:
# If heal is not running, wait is no need
return
# Get kubernetes resource information from target vnfcResourceInfo
k8s_resources = list()
for vnfc_resource in vnfc_resources:
info = {}
compute_resource = vnfc_resource.compute_resource
info['kind'] = compute_resource.vim_level_resource_type
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(info['kind']))
info['name'] = rsc_metadata.get('name')
info['namespace'] = rsc_metadata.get('namespace')
if not info['namespace']:
info['namespace'] = "default"
k8s_resources.append(info)
# exclude duplicate entries
k8s_resources = list(map(jsonutils.loads,
set(map(jsonutils.dumps, k8s_resources))))
# get replicas of scalable resources for checking number of pod
scalable_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
for k8s_resource in k8s_resources:
if k8s_resource.get('kind') in scalable_kinds:
scale_info = self._call_read_scale_api(
app_v1_api_client=app_v1_api_client,
namespace=k8s_resource.get('namespace'),
name=k8s_resource.get('name'),
kind=k8s_resource.get('kind'))
k8s_resource['replicas'] = scale_info.spec.replicas
stack_retries = self.STACK_RETRIES
status = 'Pending'
while status == 'Pending' and stack_retries > 0:
pods_information = []
pod_list_dict = {}
is_unmatch_pods_num = False
# Get related pod information and check status
for k8s_resource in k8s_resources:
namespace = k8s_resource.get('namespace')
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=k8s_resource.get('namespace'))
pod_list_dict[namespace] = pod_list
tmp_pods_info = list()
for pod in pod_list.items:
match_result = self._is_match_pod_naming_rule(
k8s_resource.get('kind'),
k8s_resource.get('name'),
pod.metadata.name)
if match_result:
tmp_pods_info.append(pod)
# NOTE(ueha): The status of pod being deleted is retrieved
# as "Running", which cause incorrect information to be
# stored in vnfcResouceInfo. Therefore, for the scalable
# kinds, by comparing the actual number of pods with the
# replicas, it can wait until the pod deletion is complete
# and store correct information to vnfcResourceInfo.
if k8s_resource.get('kind') in scalable_kinds and \
k8s_resource.get('replicas') != len(tmp_pods_info):
LOG.warning("Unmatch number of pod. (kind: %(kind)s,"
" name: %(name)s, replicas: %(replicas)s,"
" actual_pod_num: %(actual_pod_num)s)", {
'kind': k8s_resource.get('kind'),
'name': k8s_resource.get('name'),
'replicas': str(k8s_resource.get('replicas')),
'actual_pod_num': str(len(tmp_pods_info))})
is_unmatch_pods_num = True
pods_information.extend(tmp_pods_info)
status = self._get_pod_status(pods_information)
if status == 'Unknown':
error_reason = _("Pod status is found Unknown")
LOG.warning("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealWaitFailed(reason=error_reason)
elif status == 'Pending' or is_unmatch_pods_num:
time.sleep(self.STACK_RETRY_WAIT)
stack_retries = stack_retries - 1
status = 'Pending'
if stack_retries == 0 and status != 'Running':
error_reason = _("Resource healing is not completed within"
" {wait} seconds").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT))
LOG.error("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealWaitFailed(reason=error_reason)
except Exception as e:
LOG.error('Healing wait CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def post_heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
"""Update VnfcResourceInfo after healing"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=inst_vnf_info,
heal_vnf_request=heal_vnf_request
)
# initialize
updated_vdu_ids = []
pod_list_dict = {}
for vnfc_resource in vnfc_resources:
vdu_id = vnfc_resource.vdu_id
if vdu_id in updated_vdu_ids:
# For updated vdu_id, go to the next Loop
continue
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
pod_name = compute_resource.resource_id
if rsc_kind == 'Pod' or rsc_kind == 'StatefulSet':
# No update required as the pod name does not change
continue
# Update vnfcResourceInfo when other rsc_kind
# (Deployment, DaemonSet, ReplicaSet)
actual_pod_names, added_pod_names = self._get_added_pod_names(
core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource,
pod_list_dict)
updated_vnfc_ids = []
# Update entries that pod was not found when heal_vnf method
if added_pod_names:
for vnfc_rsc in vnfc_resources:
rsc_id = vnfc_rsc.compute_resource.resource_id
if vdu_id == vnfc_rsc.vdu_id and \
rsc_id == VNFC_POD_NOT_FOUND:
pod_name = added_pod_names.pop()
vnfc_rsc.compute_resource.resource_id = pod_name
LOG.warning("Update resource_id of the"
" entry where the pod was not found,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" new podname:%(pod_name)s",
{'vnfc_id': vnfc_rsc.id,
'pod_name': pod_name})
updated_vnfc_ids.append(vnfc_rsc.id)
if not added_pod_names:
break
# Update entries that was healed successful
if added_pod_names:
for vnfc_rsc_id in heal_vnf_request.vnfc_instance_id:
if vnfc_rsc_id in updated_vnfc_ids:
# If the entry has already been updated,
# go to the next loop
continue
for vnfc_rsc in vnfc_resources:
if vdu_id == vnfc_rsc.vdu_id and \
vnfc_rsc_id == vnfc_rsc.id:
pod_name = added_pod_names.pop()
compute_resource = vnfc_rsc.compute_resource
compute_resource.resource_id = pod_name
if not added_pod_names:
break
updated_vdu_ids.append(vdu_id)
except Exception as e:
LOG.error('Post healing CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def change_ext_conn_vnf(self, context, vnf_instance, vnf_dict,
vim_connection_info, change_ext_conn_req):
raise NotImplementedError()
def change_ext_conn_vnf_wait(self, context, vnf_instance,
vim_connection_info):
raise NotImplementedError()
def post_change_ext_conn_vnf(self, context, vnf_instance,
vim_connection_info):
raise NotImplementedError()
def get_scale_ids(self,
plugin,
context,
vnf_dict,
auth_attr,
region_name):
return_id_list = []
return return_id_list
def get_scale_in_ids(self,
plugin,
context,
vnf_dict,
is_reverse,
auth_attr,
region_name,
number_of_steps):
return_id_list = []
return_name_list = []
return_grp_id = None
return_res_num = None
return return_id_list, return_name_list, return_grp_id, return_res_num
def scale_resource_update(self, context, vnf_instance,
scale_vnf_request, vnf_info,
vim_connection_info):
"""Update VnfcResourceInfo after scaling"""
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
# initialize Kubernetes APIs
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)
# get scale target informations
vnfd_dict = vnflcm_utils._get_vnfd_dict(context,
vnf_instance.vnfd_id,
inst_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(parsed_params={},
a_file=False,
yaml_dict_tpl=vnfd_dict)
extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca)
vdu_defs = vnflcm_utils.get_target_vdu_def_dict(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id,
tosca=tosca)
is_found = False
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
for vnf_resource in vnf_resources:
# For CNF operations, Kubernetes resource information is
# stored in vnfc_resource as follows:
# - resource_name : "namespace,name"
# - resource_type : "api_version,kind"
rsc_name = vnf_resource.resource_name.split(',')[1]
for vdu_id, vdu_def in vdu_defs.items():
vdu_properties = vdu_def.get('properties')
if rsc_name == vdu_properties.get('name'):
namespace = vnf_resource.resource_name.split(',')[0]
rsc_kind = vnf_resource.resource_type.split(',')[1]
target_vdu_id = vdu_id
if rsc_kind in target_kinds:
is_found = True
break
if is_found:
break
# extract stored Pod names by vdu_id
stored_pod_list = []
metadata = None
for vnfc_resource in inst_vnf_info.vnfc_resource_info:
if vnfc_resource.vdu_id == target_vdu_id:
stored_pod_list.append(
vnfc_resource.compute_resource.resource_id)
if not metadata:
# get metadata for new VnfcResourceInfo entry
metadata = vnfc_resource.metadata
# get actual Pod name list
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
actual_pod_list = []
for pod in pod_list.items:
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod.metadata.name)
if match_result:
actual_pod_list.append(pod.metadata.name)
# Remove the reduced pods from VnfcResourceInfo
del_index = []
for index, vnfc in enumerate(inst_vnf_info.vnfc_resource_info):
if vnfc.compute_resource.resource_id not in actual_pod_list \
and vnfc.vdu_id == target_vdu_id:
del_index.append(index)
for ind in reversed(del_index):
inst_vnf_info.vnfc_resource_info.pop(ind)
# Add the increased pods to VnfcResourceInfo
for actual_pod_name in actual_pod_list:
if actual_pod_name not in stored_pod_list:
add_vnfc_resource = objects.VnfcResourceInfo()
add_vnfc_resource.id = uuidutils.generate_uuid()
add_vnfc_resource.vdu_id = target_vdu_id
resource = objects.ResourceHandle()
resource.resource_id = actual_pod_name
resource.vim_level_resource_type = rsc_kind
add_vnfc_resource.compute_resource = resource
add_vnfc_resource.metadata = metadata
inst_vnf_info.vnfc_resource_info.append(
add_vnfc_resource)
except Exception as e:
LOG.error('Update vnfc resource info got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def scale_in_reverse(self,
context,
plugin,
auth_attr,
vnf_info,
scale_vnf_request,
region_name,
scale_name_list,
grp_id):
# NOTE(ueha): The `is_reverse` option is not supported in kubernetes
# VIM, and returns an error response to the user if `is_reverse` is
# true. However, since this method is called in the sequence of
# rollback operation, implementation is required.
vnf_instance_id = vnf_info['vnf_lcm_op_occ'].vnf_instance_id
aspect_id = scale_vnf_request.aspect_id
vnf_instance = objects.VnfInstance.get_by_id(context, vnf_instance_id)
vnfd_dict = vnflcm_utils._get_vnfd_dict(context,
vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=vnfd_dict)
extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca)
policy = dict()
policy['name'] = aspect_id
policy['action'] = 'in'
policy['vnf_instance_id'] = vnf_instance_id
policy['vdu_defs'] = vnflcm_utils.get_target_vdu_def_dict(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id,
tosca=tosca)
policy['delta_num'] = vnflcm_utils.get_scale_delta_num(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id)
self.scale(context, plugin, auth_attr, policy, region_name)
def scale_out_initial(self,
context,
plugin,
auth_attr,
vnf_info,
scale_vnf_request,
region_name):
pass
def scale_update_wait(self,
context,
plugin,
auth_attr,
vnf_info,
region_name):
lcm_op_occ = vnf_info.get('vnf_lcm_op_occ')
vnf_instance_id = lcm_op_occ.get('vnf_instance_id')
operation_params = jsonutils.loads(lcm_op_occ.get('operation_params'))
scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive(
operation_params, context=context)
aspect_id = scale_vnf_request.aspect_id
vnf_instance = objects.VnfInstance.get_by_id(context, vnf_instance_id)
vnfd_dict = vnflcm_utils._get_vnfd_dict(context,
vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(parsed_params={}, a_file=False,
yaml_dict_tpl=vnfd_dict)
extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca)
policy = dict()
policy['name'] = aspect_id
policy['vnf_instance_id'] = lcm_op_occ.get('vnf_instance_id')
policy['vdu_defs'] = vnflcm_utils.get_target_vdu_def_dict(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id,
tosca=tosca)
self.scale_wait(context, plugin, auth_attr, policy,
region_name, None)
def get_cinder_list(self,
vnf_info):
pass
def get_grant_resource(self,
plugin,
vnf_instance,
vnf_info,
scale_vnf_request,
placement_obj_list,
vim_connection_info,
del_list):
pass
def get_rollback_ids(self,
plugin,
context,
vnf_dict,
aspect_id,
auth_attr,
region_name):
return_id_list = []
return_name_list = []
return_grp_id = None
return return_id_list, return_name_list, return_grp_id
| {
"content_hash": "8f787038c8dfede1473c25ea9574164c",
"timestamp": "",
"source": "github",
"line_count": 2504,
"max_line_length": 79,
"avg_line_length": 47.028354632587856,
"alnum_prop": 0.5125637955485356,
"repo_name": "stackforge/tacker",
"id": "844f409b8aae3b59957eddbbc374fd12bdf4f14a",
"size": "118359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1143026"
},
{
"name": "Shell",
"bytes": "26584"
}
],
"symlink_target": ""
} |
import functools
# Cache execution
# Pay attention do not use this decorator into functions with multiple argments which may cause large variability.
def memoize(func):
"""
Cache the results of the function
"""
cache = {}
@functools.wraps(func)
def wrapper(*args):
if args in cache:
return cache[args]
print('Calling {}()'.format(func.__name__))
result = func(*args)
cache[args] = result
return result
return wrapper
@memoize
def addvalues(x,y):
return x+y
if __name__ == '__main__':
print('First calling...')
print('{}'.format(addvalues(4,5)))
print('Second calling...')
print('{}'.format(addvalues(4,5)))
print('Third calling...')
print('{}'.format(addvalues(2,5)))
| {
"content_hash": "057bd0d4f4e1e8e5b860cb5a7ee20746",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 114,
"avg_line_length": 24.060606060606062,
"alnum_prop": 0.5894206549118388,
"repo_name": "helloTC/LearnPython",
"id": "035c4ec35204141bf7ad20cc79efee18d3f85cb6",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new_usage/decorator_memoization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "90925"
}
],
"symlink_target": ""
} |
from formcreator import Form, markdown
from forms import *
import communicate_server
from utils import pformat
@pformat
def train_func(transform_id, data_namespace):
request = dict(
TransformId=transform_id,
DataNamespace=data_namespace,
)
return communicate_server.post("train", request)
train = (Form(train_func, name="Train", desc=markdown(u"""
Train
=====
Trains a transform in the data graph, with the input namespace. This operation automatically trains and evaluates it's ancestors in the data graph. This operation is idempotent.
"""))
+ transform_id_form()
+ data_namespace_form()
)
| {
"content_hash": "b7257940e5c83c81a4da52db1aa95e66",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 177,
"avg_line_length": 28.681818181818183,
"alnum_prop": 0.7242472266244057,
"repo_name": "diogo149/ProtoML-clojure",
"id": "dfada7b3ebfc78e4454401cff6c5b88002589b91",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "43858"
},
{
"name": "Python",
"bytes": "9213"
}
],
"symlink_target": ""
} |
from typing import Optional, Union
import re
def decamelize(name: str) -> str:
"""Decamelize `name`. Convert `CamelCase` into `Camel Case`."""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s1)
def to_identifier(name: str) -> str:
"""Replace spaces with underscores ``_`` and make the string lower-case."""
return re.sub(r' ', r'_', name).lower()
def to_bool(value: Union[str,bool,int]) -> Optional[bool]:
"""Convert a string to boolean. True values: 1, yes, true, false values:
0, no, false, otherwise returns `None`."""
if isinstance(value, bool):
return value
elif isinstance(value, int):
return bool(value)
elif isinstance(value, str):
lower: str = value.lower()
if lower in ("1", "true", "yes"):
return True
elif lower in ("0", "false", "no"):
return False
else:
return None
else:
raise TypeError("Can't convert value of type '{}"
"to bool".format(type(value)))
| {
"content_hash": "8f67a64157d5d61757af1c706a6c7587",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 30.885714285714286,
"alnum_prop": 0.5596669750231268,
"repo_name": "Stiivi/entigen",
"id": "fa406b772ba8a560d1d9d8413d25cd16c268dfcc",
"size": "1081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entigen/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42583"
}
],
"symlink_target": ""
} |
from sklearn import datasets
from sklearn.model_selection import train_test_split
from ray.util.xgboost import RayDMatrix, RayParams, train
# __train_begin__
num_cpus_per_actor = 1
num_actors = 1
def train_model(config):
# Load dataset
data, labels = datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
train_set = RayDMatrix(train_x, train_y)
test_set = RayDMatrix(test_x, test_y)
evals_result = {}
bst = train(
params=config,
dtrain=train_set,
evals=[(test_set, "eval")],
evals_result=evals_result,
verbose_eval=False,
ray_params=RayParams(
num_actors=num_actors, cpus_per_actor=num_cpus_per_actor))
bst.save_model("model.xgb")
# __train_end__
# __load_begin__
def load_best_model(best_logdir):
import xgboost as xgb
import os
best_bst = xgb.Booster()
best_bst.load_model(os.path.join(best_logdir, "model.xgb"))
return best_bst
# __load_end__
def main():
# __tune_begin__
from ray import tune
# Set config
config = {
"tree_method": "approx",
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9)
}
# __tune_end__
# __tune_run_begin__
analysis = tune.run(
train_model,
config=config,
metric="eval-error",
mode="min",
num_samples=4,
resources_per_trial={
"cpu": 1,
"extra_cpu": num_actors * num_cpus_per_actor
})
# Load in the best performing model.
best_bst = load_best_model(analysis.best_logdir)
# Use the following code block instead if using Ray Client.
# import ray
# if ray.util.client.ray.is_connected():
# # If using Ray Client best_logdir is a directory on the server.
# # So we want to make sure we wrap model loading in a task.
# remote_load_fn = ray.remote(load_best_model)
# best_bst = ray.get(remote_load_fn.remote(analysis.best_logdir))
# Do something with the best model.
_ = best_bst
accuracy = 1. - analysis.best_result["eval-error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
# __tune_run_end__
if __name__ == "__main__":
main()
| {
"content_hash": "fcb82b78fead860adadfefed96321312",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 73,
"avg_line_length": 26.072164948453608,
"alnum_prop": 0.6010280743376829,
"repo_name": "pcmoritz/ray-1",
"id": "395cea50a1a0a52ab3b8d26ee27f3810c209dc37",
"size": "2529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/xgboost/simple_tune.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
from time import clock
def timing(func):
"""Decorator for timing
Calculates how long does the function takes to execute
and prints some useful information
"""
def wrapper(*arg):
t1 = clock()
result = func(*arg)
t2 = clock()
f_name = func.__name__
print("Excuted {} in {} seconds".format(f_name, t2 - t1))
return result
return wrapper
| {
"content_hash": "09c0b66b63c6686cf5a7d05d8b2c4a52",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 21.63157894736842,
"alnum_prop": 0.583941605839416,
"repo_name": "pabloriutort/Aula",
"id": "5cae87a475896fd221a454f88feb219772b5f094",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithm-Design/timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "121308"
},
{
"name": "C++",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "412"
},
{
"name": "Common Lisp",
"bytes": "33313"
},
{
"name": "Gnuplot",
"bytes": "734"
},
{
"name": "HTML",
"bytes": "27380"
},
{
"name": "Java",
"bytes": "9682"
},
{
"name": "JavaScript",
"bytes": "13651"
},
{
"name": "Makefile",
"bytes": "4538"
},
{
"name": "PHP",
"bytes": "1243"
},
{
"name": "Prolog",
"bytes": "9189"
},
{
"name": "Python",
"bytes": "50888"
},
{
"name": "Shell",
"bytes": "2119"
},
{
"name": "TeX",
"bytes": "24986"
},
{
"name": "XSLT",
"bytes": "1235"
}
],
"symlink_target": ""
} |
"""
All the reusable scripts are kept in this file
"""
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
def get_object_or_none(model, **kwargs):
try:
return model.objects.get(**kwargs)
except model.DoesNotExist:
return None
class SetupViewMixin(object):
"""
A mixin to to used when some basic setup work is needed to work with view.
"""
def setup(self):
pass
def dispatch(self, request, *args, **kwargs):
self.setup()
return super(SetupViewMixin, self).dispatch(request, *args, **kwargs) | {
"content_hash": "ad0154396656e1d2a1d662ef2416368e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 27,
"alnum_prop": 0.6763285024154589,
"repo_name": "chhantyal/referly",
"id": "a5589cb2376734ba00e4e3b39eafb65690a6d945",
"size": "621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "referly/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "HTML",
"bytes": "22020"
},
{
"name": "JavaScript",
"bytes": "3765"
},
{
"name": "Python",
"bytes": "41319"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.reshape_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class ReshapeTest(tf.test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = tf.reshape(x, y)
out = tf_ans.eval()
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testComplex64Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testComplex128Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
s = list(np.shape(x))
with self.test_session():
input_tensor = tf.constant(x)
reshape_out = tf.reshape(input_tensor, [1, 8, 3])
err = tf.test.compute_gradient_error(input_tensor,
s,
reshape_out,
s,
x_init_value=x)
print("Reshape gradient error = " % err)
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testErrors(self):
y = tf.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"):
tf.reshape(y, [17, -1])
z = tf.constant(0.0, shape=[32, 128])
with self.assertRaisesRegexp(ValueError,
"Cannot reshape a tensor with 4096 elements"):
tf.reshape(z, [4095])
def testPartialShapes(self):
x = tf.placeholder(tf.float32)
# Unknown input shape, partial new shape.
y = tf.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = tf.reshape(x, tf.placeholder(tf.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = tf.reshape(x, tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.pack()`.
y = tf.reshape(x, [tf.placeholder(tf.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat()`.
y = tf.reshape(x, tf.concat(0, [tf.placeholder(tf.int32, shape=(2,)),
[37, 42]]))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
y = tf.reshape(x, tf.shape(tf.placeholder(tf.float32,
shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "d2b911dd56973de1e1f6f69548f4233d",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 35.44094488188976,
"alnum_prop": 0.5978671406354144,
"repo_name": "tongwang01/tensorflow",
"id": "8e62be107be3544f5e848c88ca4778f14477381a",
"size": "5191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/reshape_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "177722"
},
{
"name": "C++",
"bytes": "11252614"
},
{
"name": "CMake",
"bytes": "36462"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "GCC Machine Description",
"bytes": "2"
},
{
"name": "HTML",
"bytes": "968188"
},
{
"name": "Java",
"bytes": "41615"
},
{
"name": "JavaScript",
"bytes": "10844"
},
{
"name": "Jupyter Notebook",
"bytes": "1974767"
},
{
"name": "Makefile",
"bytes": "21265"
},
{
"name": "Objective-C",
"bytes": "6942"
},
{
"name": "Objective-C++",
"bytes": "61636"
},
{
"name": "Protocol Buffer",
"bytes": "122032"
},
{
"name": "Python",
"bytes": "9724114"
},
{
"name": "Shell",
"bytes": "243989"
},
{
"name": "TypeScript",
"bytes": "429623"
}
],
"symlink_target": ""
} |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VpnConnectionOperations(object):
"""VpnConnectionOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, resource_group_name, deployment_name, connection_type, virtual_network_gateway_connection_name, vnet_gateway1_id, content_version=None, express_route_circuit2_id=None, local_gateway2_id=None, location=None, routing_weight=10, shared_key="none", vnet_gateway2_id=None, custom_headers=None, raw=False, **operation_config):
"""
Create or update a virtual machine.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param connection_type: Connection type. Possible values include:
'IPSec', 'Vnet2Vnet', 'ExpressRoute'
:type connection_type: str or :class:`connectionType
<vpnconnectioncreationclient.models.connectionType>`
:param virtual_network_gateway_connection_name: Connection name.
:type virtual_network_gateway_connection_name: str
:param vnet_gateway1_id: Connect from this gateway to another gateway
or express route circuit.
:type vnet_gateway1_id: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param express_route_circuit2_id: Connect to this express route
circuit from vnet gateway 1 using connection type ExpressRoute.
:type express_route_circuit2_id: str
:param local_gateway2_id: Connect to this local gateway from vnet
gateway 1 using connection type IPSec.
:type local_gateway2_id: str
:param location: Location for resource.
:type location: str
:param routing_weight: Connection routing weight.
:type routing_weight: int
:param shared_key: IPSec shared key.
:type shared_key: str
:param vnet_gateway2_id: Connect to this vnet gateway from vnet
gateway 1 using connection type Vnet2Vnet.
:type vnet_gateway2_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DeploymentVpnConnection(content_version=content_version, connection_type=connection_type, express_route_circuit2_id=express_route_circuit2_id, local_gateway2_id=local_gateway2_id, location=location, routing_weight=routing_weight, shared_key=shared_key, virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, vnet_gateway1_id=vnet_gateway1_id, vnet_gateway2_id=vnet_gateway2_id)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DeploymentVpnConnection')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| {
"content_hash": "3bbed54ef5dbff7a3923d7c4764045df",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 431,
"avg_line_length": 47.758620689655174,
"alnum_prop": 0.6681588447653429,
"repo_name": "BurtBiel/azure-cli",
"id": "fcdaedd048307f7f009055e187ccd9322f769f85",
"size": "7613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_vpn_connection/lib/operations/vpn_connection_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "2108820"
},
{
"name": "Shell",
"bytes": "3300"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1.5, s, t 3.1, s, q"
tags = "Draw, parameter"
import cocos
from cocos.director import director
from cocos.actions import Lerp, Reverse, Repeat
from cocos import draw
import pyglet, math
class TestFigure(draw.Canvas):
line_width = draw.parameter(5)
def render(self):
x,y = director.get_window_size()
ye = 15
xs = 15
line_width = self.line_width
self.set_color( (255,255,0,125) )
self.set_stroke_width( line_width )
parts = 5
# draw lines
self.set_endcap( draw.ROUND_CAP )
self.translate(( x//2, y//2 ))
for j in range(parts):
self.move_to( (0,0) )
self.rotate( 2*math.pi/ parts )
self.push()
for i in range(parts):
self.line_to( (xs,ye) )
self.translate( (xs,ye) )
self.rotate( math.pi/ parts )
self.pop()
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
f = TestFigure()
self.add( f )
a = Lerp("line_width", 5, 55, 3)
f.do( Repeat( a + Reverse( a ) ) )
def main():
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
if __name__ == '__main__':
main()
| {
"content_hash": "67d83317a897f2e824fec25c3d14a0cc",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 72,
"avg_line_length": 26.728813559322035,
"alnum_prop": 0.5662650602409639,
"repo_name": "shujunqiao/cocos2d-python",
"id": "e9e3b2226b8c7e818bc67e2c8599f7339d62f29d",
"size": "1577",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/test_draw_parameter.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "22381"
},
{
"name": "Python",
"bytes": "1271799"
},
{
"name": "Shell",
"bytes": "7097"
}
],
"symlink_target": ""
} |
"""Remove Blink references from the gclient DEPS file contents."""
import ast
import re
import sys
def CleanupDeps(deps):
assert ast.parse(deps), 'DEPS (original) smoke test (AST parsing) failed'
# remove webkit_* vars
deps = re.sub(r"['\"]webkit_\w+['\"]:[^,]+,(?:\s*\#.*)?\s*", '', deps, re.MULTILINE)
# remove the third_party/WebKit DEPS entry.
deps = re.sub(r"['\"]src/third_party/WebKit['\"]:[^,]+,\s*", '', deps, re.MULTILINE)
# (DON'T) remove the lastchange hook.
# deps = re.sub(r"\{[^}]+LASTCHANGE.blink[^}]+\},\s*", '', deps, re.MULTILINE)
# Assume that if DEPS is still python-parsable we succeeded.
assert ast.parse(deps), 'DEPS smoke test (AST parsing) failed'
return deps
if __name__ == '__main__':
if len(sys.argv) > 1:
print >>sys.stderr, 'Updating DEPS from file: ', sys.argv[1]
with open(sys.argv[1]) as f:
input_deps = f.read()
else:
print >>sys.stderr, 'Reading DEPS from stdin'
input_deps = sys.stdin.read()
output_deps = CleanupDeps(input_deps)
if len(sys.argv) > 1:
with open(sys.argv[1], 'w') as f:
f.write(output_deps)
else:
print output_deps
| {
"content_hash": "d586d2cfce1dcd9e9b9399a73e70073b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 86,
"avg_line_length": 29.307692307692307,
"alnum_prop": 0.6185476815398075,
"repo_name": "primiano/chrome-blink-automerger",
"id": "1b3336331eddafd2cdebeb5521b81c805b6b3b11",
"size": "1328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "history_rewrite_scripts/deps_cleanup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30073"
}
],
"symlink_target": ""
} |
"""
Manage SNS Topics
=================
Create and destroy SNS topics. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit AWS credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
sns.keyid: GKTADJGHEIQSXMKKRBJ08H
sns.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
mytopic:
boto3_sns.topic_present:
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
Using a profile from pillars
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: yaml
mytopic:
boto3_sns.topic_present:
- region: us-east-1
- profile: mysnsprofile
Passing in a profile
^^^^^^^^^^^^^^^^^^^^
.. code-block:: yaml
mytopic:
boto3_sns.topic_present:
- region: us-east-1
- profile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
"""
import copy
import logging
import re
import salt.utils.json
log = logging.getLogger(__name__)
def __virtual__():
"""
Only load if boto is available.
"""
if "boto3_sns.topic_exists" in __salt__:
return "boto3_sns"
return (False, "boto3_sns module could not be loaded")
def topic_present(
name,
subscriptions=None,
attributes=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Ensure the SNS topic exists.
name
Name of the SNS topic.
subscriptions
List of SNS subscriptions.
Each subscription is a dictionary with a protocol and endpoint key:
.. code-block:: yaml
subscriptions:
- Protocol: https
Endpoint: https://www.example.com/sns-endpoint
- Protocol: sqs
Endpoint: arn:aws:sqs:us-west-2:123456789012:MyQueue
attributes
Dictionary of attributes to set on the SNS topic
Valid attribute keys are:
- Policy: the JSON serialization of the topic's access control policy
- DisplayName: the human-readable name used in the "From" field for notifications
to email and email-json endpoints
- DeliveryPolicy: the JSON serialization of the topic's delivery policy
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
something_changed = False
current = __salt__["boto3_sns.describe_topic"](name, region, key, keyid, profile)
if current:
ret["comment"] = "AWS SNS topic {} present.".format(name)
TopicArn = current["TopicArn"]
else:
if __opts__["test"]:
ret["comment"] = "AWS SNS topic {} would be created.".format(name)
ret["result"] = None
return ret
else:
TopicArn = __salt__["boto3_sns.create_topic"](
name, region=region, key=key, keyid=keyid, profile=profile
)
if TopicArn:
ret["comment"] = "AWS SNS topic {} created with ARN {}.".format(
name, TopicArn
)
something_changed = True
else:
ret["comment"] = "Failed to create AWS SNS topic {}".format(name)
log.error(ret["comment"])
ret["result"] = False
return ret
### Update any explicitly defined attributes
want_attrs = attributes if attributes else {}
# Freshen these in case we just created it above
current_attrs = __salt__["boto3_sns.get_topic_attributes"](
TopicArn, region=region, key=key, keyid=keyid, profile=profile
)
for attr in ["DisplayName", "Policy", "DeliveryPolicy"]:
curr_val = current_attrs.get(attr)
want_val = want_attrs.get(attr)
# Some get default values if not set, so it's not safe to enforce absence if they're
# not provided at all. This implies that if you want to clear a value, you must explicitly
# set it to an empty string.
if want_val is None:
continue
if _json_objs_equal(want_val, curr_val):
continue
if __opts__["test"]:
ret["comment"] += " Attribute {} would be updated on topic {}.".format(
attr, TopicArn
)
ret["result"] = None
continue
want_val = (
want_val if isinstance(want_val, str) else salt.utils.json.dumps(want_val)
)
if __salt__["boto3_sns.set_topic_attributes"](
TopicArn,
attr,
want_val,
region=region,
key=key,
keyid=keyid,
profile=profile,
):
ret["comment"] += " Attribute {} set to {} on topic {}.".format(
attr, want_val, TopicArn
)
something_changed = True
else:
ret["comment"] += " Failed to update {} on topic {}.".format(
attr, TopicArn
)
ret["result"] = False
return ret
### Add / remove subscriptions
want_subs = subscriptions if subscriptions else []
obfuscated_subs = []
current_subs = current.get("Subscriptions", [])
current_slim = [
{"Protocol": s["Protocol"], "Endpoint": s["Endpoint"]} for s in current_subs
]
subscribe = []
unsubscribe = []
for sub in want_subs:
# If the subscription contains inline digest auth, AWS will obfuscate the password with
# '****'. Thus we need to do the same with ours to permit 1-to-1 comparison.
# Example: https://user:****@my.endpoiint.com/foo/bar
endpoint = sub["Endpoint"]
matches = re.search(r"https://(?P<user>\w+):(?P<pass>\w+)@", endpoint)
if matches is not None:
sub["Endpoint"] = endpoint.replace(
":" + matches.groupdict()["pass"], ":****"
)
obfuscated_subs += [copy.deepcopy(sub)]
# Now set it back...
if sub not in current_slim:
sub["Endpoint"] = endpoint
subscribe += [sub]
for sub in current_subs:
minimal = {"Protocol": sub["Protocol"], "Endpoint": sub["Endpoint"]}
if minimal not in obfuscated_subs and sub["SubscriptionArn"].startswith(
"arn:aws:sns:"
):
unsubscribe += [sub["SubscriptionArn"]]
for sub in subscribe:
prot = sub["Protocol"]
endp = sub["Endpoint"]
if __opts__["test"]:
msg = " Subscription {}:{} would be set on topic {}.".format(
prot, endp, TopicArn
)
ret["comment"] += msg
ret["result"] = None
continue
subbed = __salt__["boto3_sns.subscribe"](
TopicArn, prot, endp, region=region, key=key, keyid=keyid, profile=profile
)
if subbed:
msg = " Subscription {}:{} set on topic {}.".format(prot, endp, TopicArn)
ret["comment"] += msg
something_changed = True
else:
msg = " Failed to set subscription {}:{} on topic {}.".format(
prot, endp, TopicArn
)
ret["comment"] += msg
ret["result"] = False
return ret
for sub in unsubscribe:
if __opts__["test"]:
msg = " Subscription {} would be removed from topic {}.".format(
sub, TopicArn
)
ret["comment"] += msg
ret["result"] = None
continue
unsubbed = __salt__["boto3_sns.unsubscribe"](
sub, region=region, key=key, keyid=keyid, profile=profile
)
if unsubbed:
ret["comment"] += " Subscription {} removed from topic {}.".format(
sub, TopicArn
)
something_changed = True
else:
msg = " Failed to remove subscription {} from topic {}.".format(
sub, TopicArn
)
ret["comment"] += msg
ret["result"] = False
return ret
if something_changed:
ret["changes"]["old"] = current
ret["changes"]["new"] = __salt__["boto3_sns.describe_topic"](
name, region, key, keyid, profile
)
return ret
def topic_absent(
name, unsubscribe=False, region=None, key=None, keyid=None, profile=None
):
"""
Ensure the named sns topic is deleted.
name
Name of the SNS topic.
unsubscribe
If True, unsubscribe all subcriptions to the SNS topic before
deleting the SNS topic
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
"""
ret = {"name": name, "result": True, "comment": "", "changes": {}}
something_changed = False
current = __salt__["boto3_sns.describe_topic"](name, region, key, keyid, profile)
if not current:
ret["comment"] = "AWS SNS topic {} absent.".format(name)
else:
TopicArn = current["TopicArn"]
if __opts__["test"]:
ret["comment"] = "AWS SNS topic {} would be removed.".format(TopicArn)
if unsubscribe:
ret["comment"] += " {} subscription(s) would be removed.".format(
len(current["Subscriptions"])
)
ret["result"] = None
return ret
if unsubscribe:
for sub in current["Subscriptions"]:
if sub["SubscriptionArn"] == "PendingConfirmation":
# The API won't let you delete subscriptions in pending status...
log.warning(
"Ignoring PendingConfirmation subscription %s %s on topic %s",
sub["Protocol"],
sub["Endpoint"],
sub["TopicArn"],
)
continue
if __salt__["boto3_sns.unsubscribe"](
sub["SubscriptionArn"],
region=region,
key=key,
keyid=keyid,
profile=profile,
):
log.debug("Deleted subscription %s for SNS topic %s", sub, TopicArn)
something_changed = True
else:
ret[
"comment"
] = "Failed to delete subscription {} for SNS topic {}".format(
sub, TopicArn
)
ret["result"] = False
return ret
if not __salt__["boto3_sns.delete_topic"](
TopicArn, region=region, key=key, keyid=keyid, profile=profile
):
ret["comment"] = "Failed to delete SNS topic {}".format(TopicArn)
log.error(ret["comment"])
ret["result"] = False
else:
ret["comment"] = "AWS SNS topic {} deleted.".format(TopicArn)
if unsubscribe:
ret["comment"] += " ".join(
[
"Subscription {} deleted".format(s)
for s in current["Subscriptions"]
]
)
something_changed = True
if something_changed:
ret["changes"]["old"] = current
ret["changes"]["new"] = __salt__["boto3_sns.describe_topic"](
name, region, key, keyid, profile
)
return ret
def _json_objs_equal(left, right):
left = __utils__["boto3.ordered"](
salt.utils.json.loads(left) if isinstance(left, str) else left
)
right = __utils__["boto3.ordered"](
salt.utils.json.loads(right) if isinstance(right, str) else right
)
return left == right
| {
"content_hash": "2173763e0b0608736d71284100a818eb",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 99,
"avg_line_length": 33.1530612244898,
"alnum_prop": 0.5407048322560788,
"repo_name": "saltstack/salt",
"id": "30509176e702fa18cf3b2641b5a96f03c807ab96",
"size": "12996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/boto3_sns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import datetime, timedelta
import unittest
from werkzeug.wrappers import Response
from flask_webcache.modifiers import cache_for, cache_control
from testutils import compare_datetimes
class ModifiersTestCase(unittest.TestCase):
def test_cache_for(self):
m = cache_for(minutes=5)
r = Response()
m.modify_response(r)
self.assertTrue(compare_datetimes(r.expires, datetime.utcnow() + timedelta(minutes=5)))
def test_two_cache_fors(self):
m1 = cache_for(minutes=5)
m2 = cache_for(minutes=3)
r = Response()
m1.modify_response(r)
m2.modify_response(r)
self.assertTrue(compare_datetimes(r.expires, datetime.utcnow() + timedelta(minutes=3)))
def test_cache_control(self):
m = cache_control(public=True)
r = Response()
m.modify_response(r)
self.assertTrue(r.cache_control.public)
def test_bad_cache_control(self):
with self.assertRaises(TypeError):
cache_control(foo=True)
def test_additive_cache_control(self):
m = cache_control(public=True)
r = Response()
r.cache_control.no_transform=True
m.modify_response(r)
self.assertTrue(r.cache_control.public)
self.assertIn('no-transform', r.cache_control)
def test_overriding_cache_control(self):
m = cache_control(public=True)
r = Response()
r.cache_control.public=False
m.modify_response(r)
self.assertTrue(r.cache_control.public)
| {
"content_hash": "eee28cf214d570d4151db3d72dc9902c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 95,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.6552380952380953,
"repo_name": "fusic-com/flask-webcache",
"id": "0b5c63309475a97b7eb8f4f157c4fdaf00a7dbae",
"size": "1575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_modifiers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40406"
}
],
"symlink_target": ""
} |
from .harp import HarpMainSource
| {
"content_hash": "21a3c0fc027bf9b64bb3a35a023687a4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "macbre/mobify",
"id": "ea2557b9ecd3db55f79158d55a2ff85c42a38c01",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobify/sources/harp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "67647"
}
],
"symlink_target": ""
} |
"""
measure with libgtop2 but not CPU affinity
"""
__author__ = 'Isaac Gouy'
from domain import Record
import os, sys, cPickle, time, threading, signal, gtop
from errno import ENOENT
from subprocess import Popen
def measure(arg,commandline,delay,maxtime,
outFile=None,errFile=None,inFile=None,logger=None,affinitymask=None):
r,w = os.pipe()
forkedPid = os.fork()
if forkedPid: # read pickled measurements from the pipe
os.close(w); rPipe = os.fdopen(r); r = cPickle.Unpickler(rPipe)
measurements = r.load()
rPipe.close()
os.waitpid(forkedPid,0)
return measurements
else:
# Sample thread will be destroyed when the forked process _exits
class Sample(threading.Thread):
def __init__(self,program):
threading.Thread.__init__(self)
self.setDaemon(1)
self.timedout = False
self.p = program
self.maxMem = 0
self.childpids = None
self.start()
def run(self):
try:
remaining = maxtime
while remaining > 0:
mem = gtop.proc_mem(self.p).resident
time.sleep(delay)
remaining -= delay
# race condition - will child processes have been created yet?
self.maxMem = max((mem + self.childmem())/1024, self.maxMem)
else:
self.timedout = True
os.kill(self.p, signal.SIGKILL)
except OSError, (e,err):
if logger: logger.error('%s %s',e,err)
def childmem(self):
if self.childpids == None:
self.childpids = set()
for each in gtop.proclist():
if gtop.proc_uid(each).ppid == self.p:
self.childpids.add(each)
mem = 0
for each in self.childpids:
mem += gtop.proc_mem(each).resident
return mem
try:
m = Record(arg)
# only write pickles to the pipe
os.close(r); wPipe = os.fdopen(w, 'w'); w = cPickle.Pickler(wPipe)
# gtop cpu is since machine boot, so we need a before measurement
cpus0 = gtop.cpu().cpus
start = time.time()
# spawn the program in a separate process
p = Popen(commandline,stdout=outFile,stderr=errFile,stdin=inFile)
# start a thread to sample the program's resident memory use
t = Sample( program = p.pid )
# wait for program exit status and resource usage
rusage = os.wait3(0)
# gtop cpu is since machine boot, so we need an after measurement
elapsed = time.time() - start
cpus1 = gtop.cpu().cpus
# summarize measurements
if t.timedout:
m.setTimedout()
elif rusage[1] == os.EX_OK:
m.setOkay()
else:
m.setError()
m.userSysTime = rusage[2][0] + rusage[2][1]
m.maxMem = t.maxMem
load = map(
lambda t0,t1:
int(round(
100.0 * (1.0 - float(t1.idle-t0.idle)/(t1.total-t0.total))
))
,cpus0 ,cpus1 )
#load.sort(reverse=1) # maybe more obvious unsorted
m.cpuLoad = ("% ".join([str(i) for i in load]))+"%"
m.elapsed = elapsed
except KeyboardInterrupt:
os.kill(p.pid, signal.SIGKILL)
except ZeroDivisionError, (e,err):
if logger: logger.warn('%s %s',err,'too fast to measure?')
except (OSError,ValueError), (e,err):
if e == ENOENT: # No such file or directory
if logger: logger.warn('%s %s',err,commandline)
m.setMissing()
else:
if logger: logger.error('%s %s',e,err)
m.setError()
finally:
w.dump(m)
wPipe.close()
# Sample thread will be destroyed when the forked process _exits
os._exit(0)
| {
"content_hash": "3461cec42532867a1fce66efa64a4e46",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 89,
"avg_line_length": 30.124087591240876,
"alnum_prop": 0.521686455052096,
"repo_name": "kragen/shootout",
"id": "72256657344fb90ea08463451c3fbd0cf20dc569",
"size": "4228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bencher/bin/planB.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "20429"
},
{
"name": "C",
"bytes": "19623"
},
{
"name": "C++",
"bytes": "7144"
},
{
"name": "CSS",
"bytes": "31855"
},
{
"name": "Common Lisp",
"bytes": "7165"
},
{
"name": "D",
"bytes": "92686"
},
{
"name": "Eiffel",
"bytes": "12668"
},
{
"name": "Erlang",
"bytes": "8985"
},
{
"name": "FORTRAN",
"bytes": "78697"
},
{
"name": "Forth",
"bytes": "4352"
},
{
"name": "Go",
"bytes": "71797"
},
{
"name": "Gosu",
"bytes": "80810"
},
{
"name": "Groovy",
"bytes": "33094"
},
{
"name": "Io",
"bytes": "20174"
},
{
"name": "Java",
"bytes": "322415"
},
{
"name": "JavaScript",
"bytes": "8125"
},
{
"name": "Lua",
"bytes": "105956"
},
{
"name": "PHP",
"bytes": "735691"
},
{
"name": "Parrot",
"bytes": "46492"
},
{
"name": "Pascal",
"bytes": "5498"
},
{
"name": "Perl",
"bytes": "220310"
},
{
"name": "Pike",
"bytes": "49077"
},
{
"name": "Python",
"bytes": "110963"
},
{
"name": "Racket",
"bytes": "389"
},
{
"name": "Rebol",
"bytes": "18387"
},
{
"name": "Ruby",
"bytes": "997"
},
{
"name": "Scala",
"bytes": "124324"
},
{
"name": "Shell",
"bytes": "27023"
},
{
"name": "Smalltalk",
"bytes": "114471"
},
{
"name": "Standard ML",
"bytes": "47458"
},
{
"name": "Tcl",
"bytes": "59635"
},
{
"name": "Visual Basic",
"bytes": "617"
},
{
"name": "ooc",
"bytes": "83657"
}
],
"symlink_target": ""
} |
"""Healthz server invoked from startup script invoked on GCE instance."""
import BaseHTTPServer
import getopt
import logging
import ssl
import sys
import urlparse
class HealthzHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler for HTTP requests."""
health_status = 'HEALTHY'
def do_GET(self): # pylint: disable=C0103
"""Handler for GET requests."""
parsed_path = urlparse.urlparse(self.path)
response_code = 400
if parsed_path.path == '/change_status':
HealthzHandler.health_status = parsed_path.query
print 'changed health status to ' + HealthzHandler.health_status
response_code = 200
elif parsed_path.path == '/healthz':
if HealthzHandler.health_status == 'HEALTHY':
response_code = 200
elif HealthzHandler.health_status == 'UNHEALTHY':
response_code = 500
self.send_response(response_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(HealthzHandler.health_status)
def main():
# Process flags
port = 12345
cert_file = ''
key_file = ''
try:
opts, _ = getopt.getopt(
sys.argv[1:],
'',
['port=', 'cert_file=', 'key_file='])
except getopt.GetoptError:
logging.error(
'healthz_server.py '
'--port <port> --cert_file <cert_file> --key_file <key_file>')
sys.exit(2)
for opt, arg in opts:
if opt == '--port':
port = int(arg)
elif opt == '--cert_file':
cert_file = arg
elif opt == '--key_file':
key_file = arg
# Start server
healthz_server = BaseHTTPServer.HTTPServer(('', port), HealthzHandler)
print 'Started healthz_server on port', port
if cert_file and key_file:
healthz_server.socket = ssl.wrap_socket(
healthz_server.socket,
certfile=cert_file,
keyfile=key_file,
server_side=True)
healthz_server.serve_forever()
if __name__ == '__main__':
main()
| {
"content_hash": "092f6a4c8da128c5863cc568edda7d39",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 73,
"avg_line_length": 27.267605633802816,
"alnum_prop": 0.637396694214876,
"repo_name": "aljim/deploymentmanager-samples",
"id": "9b10983fc6f68eb4463a61cd1170fe954a277b61",
"size": "2533",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google/resource-snippets/compute-v1/healthz_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "6428"
},
{
"name": "HTML",
"bytes": "106754"
},
{
"name": "JavaScript",
"bytes": "70015"
},
{
"name": "Makefile",
"bytes": "4430"
},
{
"name": "Python",
"bytes": "443622"
},
{
"name": "Shell",
"bytes": "251698"
}
],
"symlink_target": ""
} |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a kingcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a kingcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | {
"content_hash": "c675fdbf68fffd616af6988a0cea54c2",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.188271604938272,
"alnum_prop": 0.661860405767513,
"repo_name": "kingcoin/kingcoin",
"id": "b0f703a713288b98574ceead7745c1d06ab16b40",
"size": "7837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "59007"
},
{
"name": "C",
"bytes": "166899"
},
{
"name": "C++",
"bytes": "1680464"
},
{
"name": "Objective-C",
"bytes": "50561"
},
{
"name": "Prolog",
"bytes": "12443"
},
{
"name": "Python",
"bytes": "11655"
},
{
"name": "Shell",
"bytes": "2507"
},
{
"name": "TypeScript",
"bytes": "6403531"
}
],
"symlink_target": ""
} |
import reversion
from datetime import datetime
from hashlib import sha256
from django.contrib.gis.db import models
from django.db.models.signals import post_save
from django.db.models.signals import post_delete
from django.contrib.auth.models import User
from django.contrib.gis.geos import GeometryCollection, Point
from django.utils import timezone
from django.utils.translation import ugettext as _
from jsonfield import JSONField
from taggit.managers import TaggableManager
from onadata.apps.logger.fields import LazyDefaultBooleanField
from onadata.apps.logger.models.survey_type import SurveyType
from onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.xform_instance_parser import XFormInstanceParser,\
clean_and_parse_xml, get_uuid_from_xml
from onadata.libs.utils.common_tags import ATTACHMENTS, BAMBOO_DATASET_ID,\
DELETEDAT, GEOLOCATION, ID, MONGO_STRFTIME, NOTES, SUBMISSION_TIME, TAGS,\
UUID, XFORM_ID_STRING, SUBMITTED_BY
from onadata.libs.utils.model_tools import set_uuid
class FormInactiveError(Exception):
def __unicode__(self):
return _("Form is inactive")
def __str__(self):
return unicode(self).encode('utf-8')
# need to establish id_string of the xform before we run get_dict since
# we now rely on data dictionary to parse the xml
def get_id_string_from_xml_str(xml_str):
xml_obj = clean_and_parse_xml(xml_str)
root_node = xml_obj.documentElement
id_string = root_node.getAttribute(u"id")
if len(id_string) == 0:
# may be hidden in submission/data/id_string
elems = root_node.getElementsByTagName('data')
for data in elems:
for child in data.childNodes:
id_string = data.childNodes[0].getAttribute('id')
if len(id_string) > 0:
break
if len(id_string) > 0:
break
return id_string
def submission_time():
return timezone.now()
def update_xform_submission_count(sender, instance, created, **kwargs):
if created:
xform = XForm.objects.select_related().select_for_update()\
.get(pk=instance.xform.pk)
xform.num_of_submissions += 1
xform.last_submission_time = instance.date_created
xform.save()
profile_qs = User.profile.get_queryset()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions += 1
profile.save()
def update_xform_submission_count_delete(sender, instance, **kwargs):
try:
xform = XForm.objects.select_for_update().get(pk=instance.xform.pk)
except XForm.DoesNotExist:
pass
else:
xform.num_of_submissions -= 1
if xform.num_of_submissions < 0:
xform.num_of_submissions = 0
xform.save()
profile_qs = User.profile.get_queryset()
try:
profile = profile_qs.select_for_update()\
.get(pk=xform.user.profile.pk)
except profile_qs.model.DoesNotExist:
pass
else:
profile.num_of_submissions -= 1
if profile.num_of_submissions < 0:
profile.num_of_submissions = 0
profile.save()
@reversion.register
class Instance(models.Model):
XML_HASH_LENGTH = 64
DEFAULT_XML_HASH = None
json = JSONField(default={}, null=False)
xml = models.TextField()
xml_hash = models.CharField(max_length=XML_HASH_LENGTH, db_index=True, null=True,
default=DEFAULT_XML_HASH)
user = models.ForeignKey(User, related_name='instances', null=True)
xform = models.ForeignKey(XForm, null=True, related_name='instances')
survey_type = models.ForeignKey(SurveyType)
# shows when we first received this instance
date_created = models.DateTimeField(auto_now_add=True)
# this will end up representing "date last parsed"
date_modified = models.DateTimeField(auto_now=True)
# this will end up representing "date instance was deleted"
deleted_at = models.DateTimeField(null=True, default=None)
# ODK keeps track of three statuses for an instance:
# incomplete, submitted, complete
# we add a fourth status: submitted_via_web
status = models.CharField(max_length=20,
default=u'submitted_via_web')
uuid = models.CharField(max_length=249, default=u'')
# store an geographic objects associated with this instance
geom = models.GeometryCollectionField(null=True)
is_synced_with_mongo = LazyDefaultBooleanField(default=False)
objects = models.GeoManager()
tags = TaggableManager()
class Meta:
app_label = 'logger'
@classmethod
def set_deleted_at(cls, instance_id, deleted_at=timezone.now()):
try:
instance = cls.objects.get(id=instance_id)
except cls.DoesNotExist:
pass
else:
instance.set_deleted(deleted_at)
def _check_active(self, force):
"""Check that form is active and raise exception if not.
:param force: Ignore restrictions on saving.
"""
if not force and self.xform and not self.xform.downloadable:
raise FormInactiveError()
def _set_geom(self):
xform = self.xform
data_dictionary = xform.data_dictionary()
geo_xpaths = data_dictionary.geopoint_xpaths()
doc = self.get_dict()
points = []
if len(geo_xpaths):
for xpath in geo_xpaths:
geometry = [float(s) for s in doc.get(xpath, u'').split()]
if len(geometry):
lat, lng = geometry[0:2]
points.append(Point(lng, lat))
if not xform.instances_with_geopoints and len(points):
xform.instances_with_geopoints = True
xform.save()
self.geom = GeometryCollection(points)
def _set_json(self):
doc = self.get_dict()
if not self.date_created:
now = submission_time()
self.date_created = now
point = self.point
if point:
doc[GEOLOCATION] = [point.y, point.x]
doc[SUBMISSION_TIME] = self.date_created.strftime(MONGO_STRFTIME)
doc[XFORM_ID_STRING] = self._parser.get_xform_id_string()
doc[SUBMITTED_BY] = self.user.username\
if self.user is not None else None
self.json = doc
def _set_parser(self):
if not hasattr(self, "_parser"):
self._parser = XFormInstanceParser(
self.xml, self.xform.data_dictionary())
def _set_survey_type(self):
self.survey_type, created = \
SurveyType.objects.get_or_create(slug=self.get_root_node_name())
def _set_uuid(self):
if self.xml and not self.uuid:
uuid = get_uuid_from_xml(self.xml)
if uuid is not None:
self.uuid = uuid
set_uuid(self)
def _populate_xml_hash(self):
'''
Populate the `xml_hash` attribute of this `Instance` based on the content of the `xml`
attribute.
'''
self.xml_hash = self.get_hash(self.xml)
def get(self, abbreviated_xpath):
self._set_parser()
return self._parser.get(abbreviated_xpath)
def get_dict(self, force_new=False, flat=True):
"""Return a python object representation of this instance's XML."""
self._set_parser()
return self._parser.get_flat_dict_with_attributes() if flat else\
self._parser.to_dict()
def get_full_dict(self):
# TODO should we store all of these in the JSON no matter what?
d = self.json
data = {
UUID: self.uuid,
ID: self.id,
BAMBOO_DATASET_ID: self.xform.bamboo_dataset,
self.USERFORM_ID: u'%s_%s' % (
self.user.username,
self.xform.id_string),
ATTACHMENTS: [a.media_file.name for a in
self.attachments.all()],
self.STATUS: self.status,
TAGS: list(self.tags.names()),
NOTES: self.get_notes()
}
if isinstance(self.instance.deleted_at, datetime):
data[DELETEDAT] = self.deleted_at.strftime(MONGO_STRFTIME)
d.update(data)
return d
def get_notes(self):
return [note['note'] for note in self.notes.values('note')]
def get_root_node(self):
self._set_parser()
return self._parser.get_root_node()
def get_root_node_name(self):
self._set_parser()
return self._parser.get_root_node_name()
@staticmethod
def get_hash(input_string):
'''
Compute the SHA256 hash of the given string. A wrapper to standardize hash computation.
:param basestring input_sting: The string to be hashed.
:return: The resulting hash.
:rtype: str
'''
if isinstance(input_string, unicode):
input_string = input_string.encode('utf-8')
return sha256(input_string).hexdigest()
@property
def point(self):
gc = self.geom
if gc and len(gc):
return gc[0]
def save(self, *args, **kwargs):
force = kwargs.get('force')
if force:
del kwargs['force']
self._check_active(force)
self._set_geom()
self._set_json()
self._set_survey_type()
self._set_uuid()
self._populate_xml_hash()
super(Instance, self).save(*args, **kwargs)
def set_deleted(self, deleted_at=timezone.now()):
self.deleted_at = deleted_at
self.save()
# force submission count re-calculation
self.xform.submission_count(force_update=True)
self.parsed_instance.save()
post_save.connect(update_xform_submission_count, sender=Instance,
dispatch_uid='update_xform_submission_count')
post_delete.connect(update_xform_submission_count_delete, sender=Instance,
dispatch_uid='update_xform_submission_count_delete')
class InstanceHistory(models.Model):
class Meta:
app_label = 'logger'
xform_instance = models.ForeignKey(
Instance, related_name='submission_history')
xml = models.TextField()
# old instance id
uuid = models.CharField(max_length=249, default=u'')
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
| {
"content_hash": "1292902e8ea641e5405ca557bc1ffb2d",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 95,
"avg_line_length": 32.40978593272171,
"alnum_prop": 0.6184185695414229,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "2260b27598260778f121b271eccc443235d383c2",
"size": "10598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/apps/logger/models/instance.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
} |
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and interactive renderer
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
# make sure to have the same regression image on all platforms.
renWin.SetMultiSamples(0)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Force a starting random value
raMath = vtk.vtkMath()
raMath.RandomSeed(6)
# Generate random attributes on a plane
#
ps = vtk.vtkPlaneSource()
ps.SetXResolution(10)
ps.SetYResolution(10)
ag = vtk.vtkRandomAttributeGenerator()
ag.SetInputConnection(ps.GetOutputPort())
ag.GenerateAllDataOn()
ss = vtk.vtkSphereSource()
ss.SetPhiResolution(16)
ss.SetThetaResolution(32)
tg = vtk.vtkTensorGlyph()
tg.SetInputConnection(ag.GetOutputPort())
tg.SetSourceConnection(ss.GetOutputPort())
tg.SetScaleFactor(0.1)
tg.SetMaxScaleFactor(10)
tg.ClampScalingOn()
n = vtk.vtkPolyDataNormals()
n.SetInputConnection(tg.GetOutputPort())
cs = vtk.vtkConeSource()
cs.SetResolution(6)
glyph = vtk.vtkGlyph3D()
glyph.SetInputConnection(ag.GetOutputPort())
glyph.SetSourceConnection(cs.GetOutputPort())
glyph.SetScaleModeToDataScalingOff()
glyph.SetScaleFactor(0.05)
pdm = vtk.vtkPolyDataMapper()
pdm.SetInputConnection(n.GetOutputPort())
# pdm SetInputConnection [glyph GetOutputPort]
a = vtk.vtkActor()
a.SetMapper(pdm)
pm = vtk.vtkPolyDataMapper()
pm.SetInputConnection(ps.GetOutputPort())
pa = vtk.vtkActor()
pa.SetMapper(pm)
ren1.AddActor(a)
ren1.AddActor(pa)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(300, 300)
renWin.Render()
#iren.Start()
| {
"content_hash": "8b4b23de0895db71b11f6539748bb43f",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 63,
"avg_line_length": 22.106666666666666,
"alnum_prop": 0.7876960193003619,
"repo_name": "collects/VTK",
"id": "f35093e6109db5dad2f36041f3a9d8aca48e74e9",
"size": "1680",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Filters/General/Testing/Python/TestRandomAttributeGenerator.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "42998905"
},
{
"name": "C++",
"bytes": "60634172"
},
{
"name": "CSS",
"bytes": "7532"
},
{
"name": "Delphi",
"bytes": "3255"
},
{
"name": "Java",
"bytes": "170086"
},
{
"name": "Objective-C",
"bytes": "346001"
},
{
"name": "Perl",
"bytes": "176963"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "2780705"
},
{
"name": "Shell",
"bytes": "40417"
},
{
"name": "Tcl",
"bytes": "1893930"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
_reqs = ('django.contrib.auth', 'django.contrib.sessions')
if getattr(settings, 'NEXUS_SKIP_INSTALLED_APPS_REQUIREMENTS', False):
_reqs = ()
for r in _reqs:
if r not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Put '%s' in your "
"INSTALLED_APPS setting in order to use the nexus application." % r)
| {
"content_hash": "e733d66559cc60f66468bb5b7cbaa766",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 80,
"avg_line_length": 39,
"alnum_prop": 0.7132867132867133,
"repo_name": "disqus/nexus",
"id": "d6ad4c286f67c20f181618b411fdab66a04bacfd",
"size": "429",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nexus/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24685"
},
{
"name": "HTML",
"bytes": "28452"
},
{
"name": "JavaScript",
"bytes": "11029"
},
{
"name": "Python",
"bytes": "36539"
}
],
"symlink_target": ""
} |
'''test pysftp.Connection compression param - uses py.test'''
from __future__ import print_function
# pylint: disable = W0142
from common import *
def test_security_options(psftp):
'''test the security_options property has expected attributes and that
they are tuples'''
secopts = psftp.security_options
for attr in ['ciphers', 'compression', 'digests', 'kex', 'key_types']:
assert hasattr(secopts, attr)
assert isinstance(getattr(secopts, attr), tuple)
| {
"content_hash": "4ddf7f14caa5db7c53dcd8cfc8ef5cbe",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 34.92857142857143,
"alnum_prop": 0.6993865030674846,
"repo_name": "Clean-Cole/pysftp",
"id": "2c505d84eda9e6e3a4cb6de525c1e066d5454352",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_security_options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "101823"
},
{
"name": "Shell",
"bytes": "1104"
}
],
"symlink_target": ""
} |
'''Unit tests for models and their factories.'''
import unittest
from nose.tools import * # PEP8 asserts
from framework.forms.utils import process_payload
from modularodm.exceptions import ValidationError
from modularodm import Q
from website.project.model import MetaSchema
from website.project.model import ensure_schemas
from website.project.metadata.schemas import OSF_META_SCHEMAS
from tests.base import OsfTestCase
class TestMetaData(OsfTestCase):
def test_ensure_schemas(self):
# Should be zero MetaSchema records to begin with
assert_equal(
MetaSchema.find().count(),
0
)
ensure_schemas()
assert_equal(
MetaSchema.find().count(),
len(OSF_META_SCHEMAS)
)
def test_metaschema_uniqueness_is_enforced_in_the_database(self):
MetaSchema(name='foo', schema={'foo': 42}, schema_version=1).save()
assert_raises(ValidationError, MetaSchema(name='foo', schema={'bar': 24}, schema_version=1).save)
def test_metaschema_is_fine_with_same_name_but_different_version(self):
MetaSchema(name='foo', schema={'foo': 42}, schema_version=1).save()
MetaSchema(name='foo', schema={'foo': 42}, schema_version=2).save()
assert_equal(MetaSchema.find(Q('name', 'eq', 'foo')).count(), 2)
def test_process(self):
processed = process_payload({'foo': 'bar&baz'})
assert_equal(processed['foo'], 'bar%26baz')
def test_process_list(self):
processed = process_payload({'foo': ['bar', 'baz&bob']})
assert_equal(processed['foo'][1], 'baz%26bob')
def test_process_whitespace(self):
processed = process_payload({'foo': 'bar baz'})
assert_equal(processed['foo'], 'bar baz')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1a7d54b9856a53993b8c952f37c78cbe",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 105,
"avg_line_length": 32.945454545454545,
"alnum_prop": 0.6512141280353201,
"repo_name": "Nesiehr/osf.io",
"id": "20d533018769dbe7469229a2dd730741e3810992",
"size": "1836",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "tests/test_metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144027"
},
{
"name": "HTML",
"bytes": "215077"
},
{
"name": "JavaScript",
"bytes": "1699002"
},
{
"name": "Mako",
"bytes": "650031"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7928034"
}
],
"symlink_target": ""
} |
import pprint
#again idiom for reading in a file, relative path given
with open('../pres_on_trade.txt', 'r') as fp:
all_text = fp.read()
#str.split() will split groups of characters on any white space, easy... nice
#sorted built-in function will only sort alphbetically here
all_words = sorted(all_text.split())
#begin preparation of words for a reasonable word frequency count
#we need to change our words from str to unicode
#unicode_words = [unicode(word) for word in all_words if unicode(word)]
#list comprehensions won't work because we get errors,
#let's do a try: except: block
unicode_words = []
for word in all_words:
try:
unicode_words.append(unicode(word))
except UnicodeDecodeError:
pass
#awesome list comprehension, they take iterables and return lists
#this will clean our words of unwanted punctuation and change to all lowercase
all_words = [word.strip("?.\'-,().").lower() for word in unicode_words]
#print all_words
#help(''.strip)
#reminder on dictionary syntax - setting the key and value
#dict_name[key] = value
#word_freq_dc['word'] = 18
#using dict.get method to check for existence and build word_freq dictionary
word_freq_dc = {}
for word in all_words:
times = word_freq_dc.get(word, 0)
times += 1
word_freq_dc[word] = times
#the easy way :) if you knew about it or where to look
from collections import Counter
#help(Counter)
counter = Counter(all_words)
#can use slice method on a sequence, this gets first 40 of type list
#that is: Counter.most_common() returns a list, a list is considerd one kind of sequence
print(counter.most_common()[:40])
#end line character for clarity when printing
print '\n'
#to be sure
counter_for_dc = Counter(word_freq_dc)
counter_from_before = Counter(all_words)
print counter_for_dc == counter_from_before
#going further with a generator expression
non_small_words = (word for word in all_words
if len(word) > 4 and
word is not 'usa' and
word not in
['applause', 'laughter', 'there', 'these', 'those'])
recounter = Counter(non_small_words)
print(recounter.most_common()[:40])
#below is work we did to figure out the proper procedure to
#count words using a dictionary
#pprint.pprint(word_freq_dc)
#for k, v in word_freq_dc.iteritems():
# tupled_word_freq.append((k, v))
#tupled_word_freq = zip(word_freq_dc.itervalues(), word_freq_dc.iterkeys())
#print(tupled_word_freq)
#print sorted(tupled_word_freq)
#help(word_freq_dc.get)
| {
"content_hash": "81b519733673ed732527c331b212a1f5",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 88,
"avg_line_length": 28.617977528089888,
"alnum_prop": 0.6972909305064782,
"repo_name": "noisebridge/PythonClass",
"id": "73fc55554d03855a25852ef208233705e5a20b67",
"size": "2835",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "instructors/course-2015/functions_gens_and_ducks/examples/in_class/parsetext_trade_2015.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3255"
},
{
"name": "HTML",
"bytes": "524536"
},
{
"name": "Jupyter Notebook",
"bytes": "493067"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "Perl",
"bytes": "34109"
},
{
"name": "Python",
"bytes": "474536"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
} |
__version__=''' $Id: widgets.py 3086 2007-05-22 13:10:34Z rgbecker $ '''
__all__= (
'BarcodeI2of5',
'BarcodeCode128',
'BarcodeStandard93',
'BarcodeExtended93',
'BarcodeStandard39',
'BarcodeExtended39',
'BarcodeMSI',
'BarcodeCodabar',
'BarcodeCode11',
'BarcodeFIM',
'BarcodePOSTNET',
'BarcodeUSPS_4State',
)
from reportlab.lib.validators import isInt, isNumber, isColor, isString, isColorOrNone, OneOf, isBoolean, EitherOr, isNumberOrNone
from reportlab.lib.attrmap import AttrMap, AttrMapValue
from reportlab.lib.colors import black
from reportlab.graphics.shapes import Line, Rect, Group, NotImplementedError, String
from reportlab.graphics.charts.areas import PlotArea
'''
#snippet
#first make your Drawing
from reportlab.graphics.shapes import Drawing
d= Drawing(100,50)
#create and set up the widget
from reportlab.graphics.barcode.widgets import BarcodeStandard93
bc = BarcodeStandard93()
bc.value = 'RGB-123456'
#add to the drawing and save
d.add(bc)
# d.save(formats=['gif','pict'],fnRoot='bc_sample')
'''
class _BarcodeWidget(PlotArea):
_attrMap = AttrMap(BASE=PlotArea,
barStrokeColor = AttrMapValue(isColorOrNone, desc='Color of bar borders.'),
barFillColor = AttrMapValue(isColorOrNone, desc='Color of bar interior areas.'),
barStrokeWidth = AttrMapValue(isNumber, desc='Width of bar borders.'),
value = AttrMapValue(EitherOr((isString,isNumber)), desc='Value.'),
textColor = AttrMapValue(isColorOrNone, desc='Color of human readable text.'),
valid = AttrMapValue(isBoolean),
validated = AttrMapValue(isString,desc="validated form of input"),
encoded = AttrMapValue(None,desc="encoded form of input"),
decomposed = AttrMapValue(isString,desc="decomposed form of input"),
canv = AttrMapValue(None,desc="temporarily used for internal methods"),
gap = AttrMapValue(isNumberOrNone, desc='Width of inter character gaps.'),
)
barStrokeColor = barFillColor = textColor = black
barStrokeWidth = 0
_BCC = None
def __init__(self,BCC=None,_value='',**kw):
self._BCC = BCC
class Combiner(self.__class__,BCC):
__name__ = self.__class__.__name__
self.__class__ = Combiner
PlotArea.__init__(self)
del self.width, self.height
self.x = self.y = 0
kw.setdefault('value',_value)
BCC.__init__(self,**kw)
def rect(self,x,y,w,h,**kw):
self._Gadd(Rect(self.x+x,self.y+y,w,h,
strokeColor=self.barStrokeColor,strokeWidth=self.barStrokeWidth, fillColor=self.barFillColor))
def draw(self):
if not self._BCC: raise NotImplementedError("Abstract class %s cannot be drawn" % self.__class__.__name__)
self.canv = self
G = Group()
self._Gadd = G.add
self._Gadd(Rect(self.x,self.y,self.width,self.height,fillColor=None,strokeColor=None,strokeWidth=0.0001))
self._BCC.draw(self)
del self.canv, self._Gadd
return G
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
self._Gadd(String(self.x+x,self.y+y,text,fontName=fontName,fontSize=fontSize,
textAnchor=anchor,fillColor=self.textColor))
class BarcodeI2of5(_BarcodeWidget):
"""Interleaved 2 of 5 is used in distribution and warehouse industries.
It encodes an even-numbered sequence of numeric digits. There is an optional
module 10 check digit; if including this, the total length must be odd so that
it becomes even after including the check digit. Otherwise the length must be
even. Since the check digit is optional, our library does not check it.
"""
_tests = [
'12',
'1234',
'123456',
'12345678',
'1234567890'
]
codeName = "I2of5"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default .0075):
X-Dimension, or width of the smallest element
Minumum is .0075 inch (7.5 mils).'''),
ratio = AttrMapValue(isNumber,'''(float, default 2.2):
The ratio of wide elements to narrow elements.
Must be between 2.0 and 3.0 (or 2.2 and 3.0 if the
barWidth is greater than 20 mils (.02 inch))'''),
gap = AttrMapValue(isNumberOrNone,'''(float or None, default None):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, see default below):
Height of the symbol. Default is the height of the two
bearer bars (if they exist) plus the greater of .25 inch
or .15 times the symbol's length.'''),
checksum = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to compute and include the check digit'''),
bearers = AttrMapValue(isNumber,'''(float, in units of barWidth. default 3.0):
Height of bearer bars (horizontal bars along the top and
bottom of the barcode). Default is 3 x-dimensions.
Set to zero for no bearer bars. (Bearer bars help detect
misscans, so it is suggested to leave them on).'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 1):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, see default below):
Quiet zone size to left of code, if quiet is true.
Default is the greater of .25 inch, or .15 times the symbol's
length.'''),
rquiet = AttrMapValue(isNumber,'''(float, defaults as above):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
)
_bcTransMap = {}
def __init__(self,**kw):
from reportlab.graphics.barcode.common import I2of5
_BarcodeWidget.__init__(self,I2of5,1234,**kw)
class BarcodeCode128(BarcodeI2of5):
"""Code 128 encodes any number of characters in the ASCII character set.
"""
_tests = [
'ReportLab Rocks!'
]
codeName = "Code128"
_attrMap = AttrMap(BASE=BarcodeI2of5,UNWANTED=('bearers','checksum','ratio','checksum','stop'))
def __init__(self,**kw):
from reportlab.graphics.barcode.code128 import Code128
_BarcodeWidget.__init__(self,Code128,"AB-12345678",**kw)
class BarcodeStandard93(BarcodeCode128):
"""This is a compressed form of Code 39"""
codeName = "Standard93"
_attrMap = AttrMap(BASE=BarcodeCode128,
stop = AttrMapValue(isBoolean, desc='if we use start/stop symbols (default 1)'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.code93 import Standard93
_BarcodeWidget.__init__(self,Standard93,"CODE 93",**kw)
class BarcodeExtended93(BarcodeStandard93):
"""This is a compressed form of Code 39, allowing the full ASCII charset"""
codeName = "Extended93"
def __init__(self,**kw):
from reportlab.graphics.barcode.code93 import Extended93
_BarcodeWidget.__init__(self,Extended93,"L@@K! Code 93 ;-)",**kw)
class BarcodeStandard39(BarcodeI2of5):
"""Code39 is widely used in non-retail, especially US defence and health.
Allowed characters are 0-9, A-Z (caps only), space, and -.$/+%*.
"""
codeName = "Standard39"
def __init__(self,**kw):
from reportlab.graphics.barcode.code39 import Standard39
_BarcodeWidget.__init__(self,Standard39,"A012345B%R",**kw)
class BarcodeExtended39(BarcodeI2of5):
"""Extended 39 encodes the full ASCII character set by encoding
characters as pairs of Code 39 characters; $, /, % and + are used as
shift characters."""
codeName = "Extended39"
def __init__(self,**kw):
from reportlab.graphics.barcode.code39 import Extended39
_BarcodeWidget.__init__(self,Extended39,"A012345B}",**kw)
class BarcodeMSI(BarcodeI2of5):
"""MSI is used for inventory control in retail applications.
There are several methods for calculating check digits so we
do not implement one.
"""
codeName = "MSI"
def __init__(self,**kw):
from reportlab.graphics.barcode.common import MSI
_BarcodeWidget.__init__(self,MSI,1234,**kw)
class BarcodeCodabar(BarcodeI2of5):
"""Used in blood banks, photo labs and FedEx labels.
Encodes 0-9, -$:/.+, and four start/stop characters A-D.
"""
codeName = "Codabar"
def __init__(self,**kw):
from reportlab.graphics.barcode.common import Codabar
_BarcodeWidget.__init__(self,Codabar,"A012345B",**kw)
class BarcodeCode11(BarcodeI2of5):
"""Used mostly for labelling telecommunications equipment.
It encodes numeric digits.
"""
codeName = "Code11"
_attrMap = AttrMap(BASE=BarcodeI2of5,
checksum = AttrMapValue(isInt,'''(integer, default 2):
Whether to compute and include the check digit(s).
(0 none, 1 1-digit, 2 2-digit, -1 auto, default -1):
How many checksum digits to include. -1 ("auto") means
1 if the number of digits is 10 or less, else 2.'''),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.common import Code11
_BarcodeWidget.__init__(self,Code11,"01234545634563",**kw)
class BarcodeFIM(_BarcodeWidget):
"""
FIM was developed as part of the POSTNET barcoding system. FIM (Face Identification Marking) is used by the cancelling machines to sort mail according to whether or not they have bar code and their postage requirements. There are four types of FIM called FIM A, FIM B, FIM C, and FIM D.
The four FIM types have the following meanings:
FIM A- Postage required pre-barcoded
FIM B - Postage pre-paid, no bar code exists
FIM C- Postage prepaid prebarcoded
FIM D- Postage required, no bar code exists
"""
codeName = "FIM"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 1/32in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 1/16in):
width of intercharacter gap. None means "use barWidth".'''),
barHeight = AttrMapValue(isNumber,'''(float, default 5/8in): The bar height.'''),
quiet = AttrMapValue(isBoolean,'''(bool, default 0):
Whether to include quiet zones in the symbol.'''),
lquiet = AttrMapValue(isNumber,'''(float, default: 15/32in):
Quiet zone size to left of code, if quiet is true.'''),
rquiet = AttrMapValue(isNumber,'''(float, default 1/4in):
Quiet zone size to right left of code, if quiet is true.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps import FIM
_BarcodeWidget.__init__(self,FIM,"A",**kw)
class BarcodePOSTNET(_BarcodeWidget):
codeName = "POSTNET"
_attrMap = AttrMap(BASE=_BarcodeWidget,
barWidth = AttrMapValue(isNumber,'''(float, default 0.018*in): the bar width.'''),
spaceWidth = AttrMapValue(isNumber,'''(float or None, default 0.0275in): width of intercharacter gap.'''),
shortHeight = AttrMapValue(isNumber,'''(float, default 0.05in): The short bar height.'''),
barHeight = AttrMapValue(isNumber,'''(float, default 0.125in): The full bar height.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps import POSTNET
_BarcodeWidget.__init__(self,POSTNET,"78247-1043",**kw)
class BarcodeUSPS_4State(_BarcodeWidget):
codeName = "USPS_4State"
_attrMap = AttrMap(BASE=_BarcodeWidget,
widthSize = AttrMapValue(isNumber,'''(float, default 1): the bar width size adjustment between 0 and 1.'''),
heightSize = AttrMapValue(isNumber,'''(float, default 1): the bar height size adjustment between 0 and 1.'''),
fontName = AttrMapValue(isString, desc='human readable font'),
fontSize = AttrMapValue(isNumber, desc='human readable font size'),
tracking = AttrMapValue(isString, desc='tracking data'),
routing = AttrMapValue(isString, desc='routing data'),
humanReadable = AttrMapValue(isBoolean, desc='if human readable'),
)
def __init__(self,**kw):
from reportlab.graphics.barcode.usps4s import USPS_4State
kw.setdefault('routing','01234567891')
_BarcodeWidget.__init__(self,USPS_4State,'01234567094987654321',**kw)
def annotate(self,x,y,text,fontName,fontSize,anchor='middle'):
_BarcodeWidget.annotate(self,x,y,text,fontName,fontSize,anchor='start')
if __name__=='__main__':
import os, sys, glob
from reportlab.graphics.shapes import Drawing
os.chdir(os.path.dirname(sys.argv[0]))
if not os.path.isdir('out'):
os.mkdir('out')
map(os.remove,glob.glob(os.path.join('out','*')))
html = ['<html><head></head><body>']
a = html.append
for C in (BarcodeI2of5,
BarcodeCode128,
BarcodeStandard93,
BarcodeExtended93,
BarcodeStandard39,
BarcodeExtended39,
BarcodeMSI,
BarcodeCodabar,
BarcodeCode11,
BarcodeFIM,
BarcodePOSTNET,
BarcodeUSPS_4State,
):
name = C.__name__
i = C()
D = Drawing(100,50)
D.add(i)
D.save(formats=['gif','pict'],outDir='out',fnRoot=name)
a('<h2>%s</h2><img src="%s.gif"><br>' % (name, name))
a('</body></html>')
open(os.path.join('out','index.html'),'w').write('\n'.join(html))
| {
"content_hash": "597bab730eef733a9656d5867ce1991e",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 290,
"avg_line_length": 45.25,
"alnum_prop": 0.6277198008321397,
"repo_name": "jwheare/digest",
"id": "09d2ca0c73c16b942e3db9bbeb69d1e574ff88de",
"size": "14747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/reportlab/graphics/barcode/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "75169"
},
{
"name": "Python",
"bytes": "3874016"
}
],
"symlink_target": ""
} |
"""ProceSeq_16S setup"""
import codecs
import os
import re
import sys
NAME = "ProceSeq_16S"
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lineiter = (line.strip() for line in open(filename))
return tuple(line for line in lineiter if line and not line.startswith("#"))
try:
from setuptools import setup, find_packages
except ImportError:
raise RuntimeError("Python package setuptools hasn't been installed.\n"
"Please install setuptools before installing "
"{}.\n".format(NAME))
if sys.version_info < (3, 4, 0):
raise RuntimeError("Python 3.4.0 or higher required.\n")
def read(*parts):
here = os.path.abspath(os.path.dirname(__file__))
return codecs.open(os.path.join(here, *parts), "r").read()
def get_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_long_description():
try:
with open("README.rst", "r") as readme_file:
return readme_file.read()
except IOError:
print("Cannot find README.rst file. Long description will be empty.\n")
return ""
def get_requirements():
requirements_file = "requirements.txt"
requirements = parse_requirements(requirements_file)
return tuple(str(r) for r in requirements)
def get_scripts():
scripts = ("analyze_dada2.py",
"dada2.R",
"extract_taxonomy.py",
"proceseq-16s",
)
return tuple(os.path.join('bin', i) for i in scripts)
setup(name=NAME,
version=get_version(NAME.lower(), "__init__.py"),
description="A bioinformatics toolkit for 16S rRNA data processing.",
long_description=get_long_description(),
install_requires=get_requirements(),
keywords="bioinformatics sequencing data processing 16S microbiome",
url="https://github.com/vrbacky/proceseq_16s",
author="Filip Vrbacky",
author_email="vrbacky@fnhk.cz",
maintainer="Filip Vrbacky",
maintainer_email="vrbacky@fnhk.cz",
license="MIT",
zip_safe=False,
packages=find_packages(exclude=["bin", "docs", "test"]),
scripts=get_scripts(),
test_suite="test",
include_package_data=True,
data_files=["proceseq_16s/images/proceseq_16s.gif"],
classifiers=("Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering :: Bio-Informatics",
)
)
| {
"content_hash": "7fcfcbf3081518d109a7634934eadc3a",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 32.604395604395606,
"alnum_prop": 0.6059993259184361,
"repo_name": "vrbacky/proceseq_16s",
"id": "f13b302f8e66c68209b00de26b5a7a853129b5d6",
"size": "2990",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226851"
},
{
"name": "R",
"bytes": "16737"
}
],
"symlink_target": ""
} |
"""Show the default configuration."""
import sys
import nikola.plugins.command.init
from nikola.plugin_categories import Command
from nikola.utils import get_logger
LOGGER = get_logger('default_config')
class CommandShowConfig(Command):
"""Show the default configuration."""
name = "default_config"
doc_usage = ""
needs_config = False
doc_purpose = "Print the default Nikola configuration."
cmd_options = []
def _execute(self, options=None, args=None):
"""Show the default configuration."""
try:
print(nikola.plugins.command.init.CommandInit.create_configuration_to_string())
except Exception:
sys.stdout.buffer.write(nikola.plugins.command.init.CommandInit.create_configuration_to_string().encode('utf-8'))
| {
"content_hash": "2bc9f2b5c7447ee5b5e5d391bf920775",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 125,
"avg_line_length": 28.321428571428573,
"alnum_prop": 0.694829760403531,
"repo_name": "getnikola/nikola",
"id": "fddda26bd872bff7ee27fe0465fd4eb6ad555558",
"size": "1935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/command/default_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34036"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "2076"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1299776"
},
{
"name": "Shell",
"bytes": "9704"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""Additional help about types of credentials and authentication."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
gsutil currently supports several types of credentials/authentication, as
well as the ability to access public data anonymously (see "gsutil help anon"
for more on anonymous access). Each of these type of credentials is discussed
in more detail below, along with information about configuring and using
credentials via either the Cloud SDK or standalone installations of gsutil.
<B>Configuring/Using Credentials via Cloud SDK Distribution of gsutil</B>
When gsutil is installed/used via the Cloud SDK ("gcloud"), credentials are
stored by Cloud SDK in a non-user-editable file located under
~/.config/gcloud (any manipulation of credentials should be done via the
gcloud auth command). If you need to set up multiple credentials (e.g., one
for an individual user account and a second for a service account), the
gcloud auth command manages the credentials for you, and you switch between
credentials using the gcloud auth command as well (for more details see
https://developers.google.com/cloud/sdk/gcloud/#gcloud.auth).
Once credentials have been configured via gcloud auth, those credentials will
be used regardless of whether the user has any boto configuration files (which
are located at ~/.boto unless a different path is specified in the BOTO_CONFIG
environment variable). However, gsutil will still look for credentials in the
boto config file if a type of credential is needed that's not stored in the
gcloud credential store (e.g., an HMAC credential for an S3 account).
<B>Configuring/Using Credentials via Standalone gsutil Distribution</B>
If you installed a standalone distribution of gsutil (downloaded from
https://pub.storage.googleapis.com/gsutil.tar.gz,
https://pub.storage.googleapis.com/gsutil.zip, or PyPi), credentials are
configured using the gsutil config command, and are stored in the
user-editable boto config file (located at ~/.boto unless a different path is
specified in the BOTO_CONFIG environment). In this case if you want to set up
multiple credentials (e.g., one for an individual user account and a second
for a service account), you run gsutil config once for each credential, and
save each of the generated boto config files (e.g., renaming one to
~/.boto_user_account and the second to ~/.boto_service_account), and you
switch between the credentials using the BOTO_CONFIG environment variable
(e.g., by running BOTO_CONFIG=~/.boto_user_account gsutil ls).
Note that when using the standalone version of gsutil with the JSON API you
can configure at most one of the following types of GCS credentials in a
single boto config file: OAuth2 User Account, OAuth2 Service Account. In
addition to these, you may also have S3 HMAC credentials (necessary for using
s3:// URLs) and GCE Internal Service Account credentials. GCE Internal Service
Account credentials are used only when OAuth2 credentials are not present.
<B>SUPPORTED CREDENTIAL TYPES</B>
gsutil supports several types of credentials (the specific subset depends on
which distribution of gsutil you are using; see above discussion).
OAuth2 User Account:
This is the preferred type of credentials for authenticating requests on
behalf of a specific user (which is probably the most common use of gsutil).
This is the default type of credential that will be created when you run
"gsutil config".
For more details about OAuth2 authentication, see:
https://developers.google.com/accounts/docs/OAuth2#scenarios
HMAC:
This type of credential can be used by programs that are implemented using
HMAC authentication, which is an authentication mechanism supported by
certain other cloud storage service providers. This type of credential can
also be used for interactive use when moving data to/from service providers
that support HMAC credentials. This is the type of credential that will be
created when you run "gsutil config -a".
Note that it's possible to set up HMAC credentials for both Google Cloud
Storage and another service provider; or to set up OAuth2 user account
credentials for Google Cloud Storage and HMAC credentials for another
service provider. To do so, after you run the gsutil config command, you
can edit the generated ~/.boto config file and look for comments for where
other credentials can be added.
For more details about HMAC authentication, see:
https://developers.google.com/storage/docs/reference/v1/getting-startedv1#keys
OAuth2 Service Account:
This is the preferred type of credential to use when authenticating on
behalf of a service or application (as opposed to a user). For example, if
you will run gsutil out of a nightly cron job to upload/download data,
using a service account allows the cron job not to depend on credentials of
an individual employee at your company. This is the type of credential that
will be configured when you run "gsutil config -e".
It is important to note that a service account is considered an Editor by
default for the purposes of API access, rather than an Owner. In particular,
the fact that Editors have OWNER access in the default object and
bucket ACLs, but the canned ACL options remove OWNER access from
Editors, can lead to unexpected results. The solution to this problem is to
ensure the service account is an Owner in the Permissions tab for your
project. To find the email address of your service account, visit the
`Google Developers Console <https://cloud.google.com/console#/project>`_,
click on the project you're using, click "APIs & auth", and click
"Credentials".
To create a service account, visit the Google Developers Console and then:
- Click "APIs & auth" in the left sidebar.
- Click "Credentials".
- Click "Create New Client ID".
- Select "Service Account" as your application type.
- Save the JSON private key or the .p12 private key and password
provided.
For further information about account roles, see:
https://developers.google.com/console/help/#DifferentRoles
For more details about OAuth2 service accounts, see:
https://developers.google.com/accounts/docs/OAuth2ServiceAccount
GCE Internal Service Account:
This is the type of service account used for accounts hosted by App Engine
or GCE. Such credentials are created automatically for you on GCE when you
run the gcutil addinstance command with the --service_account flag.
For more details about GCE service accounts, see:
https://developers.google.com/compute/docs/authentication;
For more details about App Engine service accounts, see:
https://developers.google.com/appengine/docs/python/appidentity/overview
""")
class CommandOptions(HelpProvider):
"""Additional help about types of credentials and authentication."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='creds',
help_name_aliases=['credentials', 'authentication', 'auth', 'gcloud'],
help_type='additional_help',
help_one_line_summary='Credential Types Supporting Various Use Cases',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| {
"content_hash": "2f9100dade737508c631b0e2ae932f11",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 84,
"avg_line_length": 50.79054054054054,
"alnum_prop": 0.756950911267793,
"repo_name": "dimfeld/gsutil",
"id": "f7ac5fc1f764d04bbcdaa6c4b62e98040d353f05",
"size": "8137",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gslib/addlhelp/creds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2024158"
}
],
"symlink_target": ""
} |
import mock
from oslo_concurrency import processutils as putils
from cinder import context
from cinder import exception
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import lio
class TestLioAdmDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestLioAdmDriver, self).setUp()
with mock.patch.object(lio.LioAdm, '_verify_rtstool'):
self.target = lio.LioAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_get_target(self, mexecute, mpersist_cfg, mlock_exec):
mexecute.return_value = (self.test_vol, None)
self.assertEqual(self.test_vol, self.target._get_target(self.test_vol))
self.assertFalse(mpersist_cfg.called)
expected_args = ('cinder-rtstool', 'get-targets')
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
def test_get_iscsi_target(self):
ctxt = context.get_admin_context()
expected = 0
self.assertEqual(expected,
self.target._get_iscsi_target(ctxt,
self.testvol['id']))
def test_get_target_and_lun(self):
lun = 0
iscsi_target = 0
ctxt = context.get_admin_context()
expected = (iscsi_target, lun)
self.assertEqual(expected,
self.target._get_target_and_lun(ctxt, self.testvol))
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target(self, mget_target, mexecute, mpersist_cfg,
mlock_exec):
mget_target.return_value = 1
# create_iscsi_target sends volume_name instead of volume_id on error
self.assertEqual(
1,
self.target.create_iscsi_target(
self.test_vol,
1,
0,
self.fake_volumes_dir))
mpersist_cfg.assert_called_once_with(self.VOLUME_NAME)
mexecute.assert_called_once_with(
'cinder-rtstool',
'create',
self.fake_volumes_dir,
self.test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
run_as_root=True)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target', return_value=1)
def test_create_iscsi_target_port_ip(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
ip = '10.0.0.15'
port = 3261
self.assertEqual(
1,
self.target.create_iscsi_target(
name=self.test_vol,
tid=1,
lun=0,
path=self.fake_volumes_dir,
**{'portals_port': port, 'portals_ips': [ip]}))
expected_args = (
'cinder-rtstool',
'create',
self.fake_volumes_dir,
self.test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
'-p%s' % port,
'-a' + ip)
mlock_exec.assert_any_call(*expected_args, run_as_root=True)
mexecute.assert_any_call(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.VOLUME_NAME)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch.object(utils, 'execute')
@mock.patch.object(lio.LioAdm, '_get_target', return_value=1)
def test_create_iscsi_target_port_ips(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
test_vol = 'iqn.2010-10.org.openstack:' + self.VOLUME_NAME
ips = ['10.0.0.15', '127.0.0.1']
port = 3261
self.assertEqual(
1,
self.target.create_iscsi_target(
name=test_vol,
tid=1,
lun=0,
path=self.fake_volumes_dir,
**{'portals_port': port, 'portals_ips': ips}))
expected_args = (
'cinder-rtstool',
'create',
self.fake_volumes_dir,
test_vol,
'',
'',
self.target.iscsi_protocol == 'iser',
'-p%s' % port,
'-a' + ','.join(ips))
mlock_exec.assert_any_call(*expected_args, run_as_root=True)
mexecute.assert_any_call(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.VOLUME_NAME)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute',
side_effect=putils.ProcessExecutionError)
@mock.patch.object(lio.LioAdm, '_get_target')
def test_create_iscsi_target_already_exists(self, mget_target, mexecute,
mpersist_cfg, mlock_exec):
chap_auth = ('foo', 'bar')
self.assertRaises(exception.ISCSITargetCreateFailed,
self.target.create_iscsi_target,
self.test_vol,
1,
0,
self.fake_volumes_dir,
chap_auth)
self.assertFalse(mpersist_cfg.called)
expected_args = ('cinder-rtstool', 'create', self.fake_volumes_dir,
self.test_vol, chap_auth[0], chap_auth[1], False)
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_remove_iscsi_target(self, mexecute, mpersist_cfg, mlock_exec):
# Test the normal case
self.target.remove_iscsi_target(0,
0,
self.testvol['id'],
self.testvol['name'])
expected_args = ('cinder-rtstool', 'delete',
self.iscsi_target_prefix + self.testvol['name'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mexecute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
# Test the failure case: putils.ProcessExecutionError
mlock_exec.reset_mock()
mpersist_cfg.reset_mock()
mexecute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetRemoveFailed,
self.target.remove_iscsi_target,
0,
0,
self.testvol['id'],
self.testvol['name'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
# Ensure there have been no calls to persist configuration
self.assertFalse(mpersist_cfg.called)
@mock.patch.object(lio.LioAdm, '_get_targets')
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch('cinder.utils.execute')
def test_ensure_export(self, mock_exec, mock_execute, mock_get_targets):
ctxt = context.get_admin_context()
mock_get_targets.return_value = None
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
expected_args = ('cinder-rtstool', 'restore')
mock_exec.assert_called_once_with(*expected_args, run_as_root=True)
@mock.patch.object(lio.LioAdm, '_get_targets')
@mock.patch.object(lio.LioAdm, '_restore_configuration')
def test_ensure_export_target_exist(self, mock_restore, mock_get_targets):
ctxt = context.get_admin_context()
mock_get_targets.return_value = 'target'
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
self.assertFalse(mock_restore.called)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
@mock.patch.object(lio.LioAdm, '_get_iscsi_properties')
def test_initialize_connection(self, mock_get_iscsi, mock_execute,
mpersist_cfg, mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
connector = {'initiator': 'fake_init'}
# Test the normal case
mock_get_iscsi.return_value = 'foo bar'
expected_return = {'driver_volume_type': 'iscsi',
'data': 'foo bar'}
self.assertEqual(expected_return,
self.target.initialize_connection(self.testvol,
connector))
expected_args = ('cinder-rtstool', 'add-initiator', target_id,
self.expected_iscsi_properties['auth_username'],
'2FE0CQ8J196R', connector['initiator'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mock_execute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
# Test the failure case: putils.ProcessExecutionError
mlock_exec.reset_mock()
mpersist_cfg.reset_mock()
mock_execute.side_effect = putils.ProcessExecutionError
self.assertRaises(exception.ISCSITargetAttachFailed,
self.target.initialize_connection,
self.testvol,
connector)
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
# Ensure there have been no calls to persist configuration
self.assertFalse(mpersist_cfg.called)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_terminate_connection(self, mock_execute, mpersist_cfg,
mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
connector = {'initiator': 'fake_init'}
self.target.terminate_connection(self.testvol,
connector)
expected_args = ('cinder-rtstool', 'delete-initiator', target_id,
connector['initiator'])
mlock_exec.assert_called_once_with(*expected_args, run_as_root=True)
mock_execute.assert_called_once_with(*expected_args, run_as_root=True)
mpersist_cfg.assert_called_once_with(self.fake_volume_id)
@mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute)
@mock.patch.object(lio.LioAdm, '_persist_configuration')
@mock.patch('cinder.utils.execute')
def test_terminate_connection_fail(self, mock_execute, mpersist_cfg,
mlock_exec):
target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id
mock_execute.side_effect = putils.ProcessExecutionError
connector = {'initiator': 'fake_init'}
self.assertRaises(exception.ISCSITargetDetachFailed,
self.target.terminate_connection,
self.testvol,
connector)
mlock_exec.assert_called_once_with('cinder-rtstool',
'delete-initiator', target_id,
connector['initiator'],
run_as_root=True)
self.assertFalse(mpersist_cfg.called)
def test_iscsi_protocol(self):
self.assertEqual('iscsi', self.target.iscsi_protocol)
@mock.patch.object(lio.LioAdm, '_get_target_and_lun', return_value=(1, 2))
@mock.patch.object(lio.LioAdm, 'create_iscsi_target', return_value=3)
@mock.patch.object(lio.LioAdm, '_get_target_chap_auth',
return_value=(mock.sentinel.user, mock.sentinel.pwd))
def test_create_export(self, mock_chap, mock_create, mock_get_target):
ctxt = context.get_admin_context()
result = self.target.create_export(ctxt, self.testvol_2,
self.fake_volumes_dir)
loc = (u'%(ip)s:%(port)d,3 %(prefix)s%(name)s 2' %
{'ip': self.configuration.iscsi_ip_address,
'port': self.configuration.iscsi_port,
'prefix': self.iscsi_target_prefix,
'name': self.testvol_2['name']})
expected_result = {
'location': loc,
'auth': 'CHAP %s %s' % (mock.sentinel.user, mock.sentinel.pwd),
}
self.assertEqual(expected_result, result)
mock_create.assert_called_once_with(
self.iscsi_target_prefix + self.testvol_2['name'],
1,
2,
self.fake_volumes_dir,
(mock.sentinel.user, mock.sentinel.pwd),
portals_ips=[self.configuration.iscsi_ip_address],
portals_port=self.configuration.iscsi_port)
| {
"content_hash": "b811a621103c1ab7a7ec13003384944b",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 79,
"avg_line_length": 43.52923076923077,
"alnum_prop": 0.5719940623453735,
"repo_name": "Nexenta/cinder",
"id": "f582efae4a2ac67088c224f385228974908b3695",
"size": "14720",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/targets/test_lio_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18007018"
},
{
"name": "Shell",
"bytes": "13543"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
# if req.get("result").get("action") == "searchFlights":
# baseurl = "https://devapi.flydubai.com/res/v3/flights/1"
# if req.get("result").get("action") == "optionalExtras":
# baseurl = "https://devapi.flydubai.com/res/v3/optional/extras"
if req.get("result").get("action") == "flightdetails":
baseurl = "https://devapi.flydubai.com/ops/v3/flightinfo"
# else:
# return{}
yql_query = makeYqlQuery(req)
print(yql_query)
# if yql_query is None:
# return {}
# yql_url = baseurl + urlencode(yql_query)
# print("URL data: \n "+yql_url)
data = urllib.parse.urlencode(yql_query)
data = data.encode('ascii') # data should be bytes
req = urllib.request.Request(baseurl, data)
result = urlopen(req).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
flightNumber = paramters.get("flightNumber")
# if city is None:
# return None
return {
"{ 'carrierCode' : 'FZ', 'flightNumber': '"+flightNumber+"', 'flightDate': '20-10-2017', 'origin': 'DXB', 'destination':'DOH' }"
}
def makeWebhookResult(data):
# {
# "carrierCode": "FZ",
# "flightNumber": "24",
# "flightDate": "20-10-2017",
# "origin": "DXB",
# "destination": "DOH",
# "status": "FO",
# "gateNumber": 228,
# "terminal": "T2"
# }
print(data)
# query = data.get('query')
# if query is None:
# return {}
# result = query.get('results')
# if result is None:
# return {}
# channel = result.get('channel')
# if channel is None:
# return {}
# item = channel.get('item')
# location = channel.get('location')
# units = channel.get('units')
# if (location is None) or (item is None) or (units is None):
# return {}
# condition = item.get('condition')
# if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = "The flight is in "+data.get('status')+" status, and you can see it at Gate number "+data.get('gateNumber')+" of the "+data.get('terminal')+" Terminal."
# speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
# ", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| {
"content_hash": "2f0c294b4f34810523c7e881a887504e",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 165,
"avg_line_length": 26.16788321167883,
"alnum_prop": 0.6041841004184101,
"repo_name": "mailmevj/apiai-flightassistant-webhook-sample",
"id": "dc19325e7ab00bc953f845983eb1d59d5d9796e5",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3608"
}
],
"symlink_target": ""
} |
"""``clicmd_ons.py``
`Module for CLI specific functionality`
"""
import re
import os
import traceback
import time
import pytest
from . import clissh
from . import loggers
from .custom_exceptions import CLICMDException
def get_table_title(line):
"""Get table name.
Args:
line(str): output
"""
if ":" in line:
table_title = line.split(":")[0]
return table_title
else:
return None
def get_column_ranges(line):
"""Get column ranges.
Args:
line(str): string with "--" column delimiters
Returns:
list[list[str]]: column_indexes - list of lists with string indexes of columns
"""
if "--" in line:
column_indexes = []
columns = line.split()
idx = 0
for column_item in columns:
column_indexes.append([idx, idx + len(column_item.lstrip())])
idx = idx + len(column_item) + 1
return column_indexes
else:
return None
def get_column_names(table_data, column_ranges):
"""Get column name.
Args:
table_data(str): String fetched from SSH CLI with column names
column_ranges(list): List with columns width string indexes used to extract column names
Returns:
list[str]: column_names_list - List of strings with columns names
"""
column_names_dict = {}
column_names_list = []
i = 0
for line in table_data.split('\n'):
col_name_lines = []
for index_pair in column_ranges:
col_name_lines.append(line[index_pair[0]: index_pair[1]].rstrip().lstrip())
column_names_dict.update({i: col_name_lines})
i += 1
if len(list(column_names_dict.keys())) > 1:
for j in range(len(column_names_dict[0])):
column_name = ''
for key in column_names_dict:
column_name += "%s " % (column_names_dict[key][j], )
column_names_list.append(column_name.rstrip())
else:
column_names_list = column_names_dict[0]
return column_names_list
def get_dotted_table(table_lines):
"""Get table data.
Args:
table_lines(list): list of table rows
Returns:
list[list]: table_list - list of lists with row names and values
"""
table_list = []
for line in table_lines:
if "...." in line:
table_list.append([re.compile(r' \.{2,} ').split(line)[0], re.compile(r' \.{2,} ').split(line)[1]])
return table_list
def get_table_value(table_data, identifier=None, checker=None):
"""Gets necessary field value from the table.
Args:
table_data(str): console output data
identifier(list[]): Column name and row number['column', 'value']
checker(str): column name to check value
Raises:
CLICMDException: invalid row length
Returns:
str: Field value
"""
table_value = ''
# === parse received data for table header and type of table ==================
i = 0
raw_tables_dict = {}
tables_dict = {}
table_lines_list = []
raw_lines_list = []
for line in table_data.split("\n"):
raw_lines_list.append(line.replace("\r", ""))
raw_lines_list.pop(0)
raw_lines_list.pop(-1)
for line in raw_lines_list:
if line.strip():
table_lines_list.append(line)
else:
if len(table_lines_list) > 0:
raw_tables_dict.update({i: table_lines_list})
table_lines_list = []
i += 1
for key in raw_tables_dict:
# table_header = ''
# check if table header present in line
if ":" in raw_tables_dict[key][0].rstrip()[-1]:
# table_header = raw_tables_dict[key][0].rstrip()
raw_tables_dict[key].pop(0)
elif ".." in raw_tables_dict[key][0]:
tables_dict[key] = get_dotted_table(raw_tables_dict[key])
else:
j = 0
for line in raw_tables_dict[key]:
if '--' not in line:
j += 1
else:
col_names_text = ''
column_ranges = get_column_ranges(line)
for idx in range(j):
col_names_text += '%s\n' % (raw_tables_dict[key][idx], )
column_names_list = get_column_names(col_names_text, column_ranges)
tables_dict[key] = []
for table_column_idx, value in enumerate(column_names_list):
table_column = []
table_column.append(column_names_list[table_column_idx])
for table_row_idx in range(j + 1, len(raw_tables_dict[key])):
table_column.append(raw_tables_dict[key][table_row_idx][column_ranges[table_column_idx][0]:
column_ranges[table_column_idx][1]].rstrip().lstrip())
tables_dict[key].append(table_column)
# ===== getting value from table ==================================================
# in case we have multiple tables lets find out do they have similar columns
row_index = None
if len(list(tables_dict.keys())) > 1:
flags_dict = {}
# in case tables have equal length
if len(tables_dict[list(tables_dict.keys())[0]]) == len(tables_dict[list(tables_dict.keys())[1]]):
# take columns from first to last from first table from dictionary
for col_idx in range(len(tables_dict[0])):
for table_key in range(1, len(list(tables_dict.keys()))):
# and compare it with according column from next tables in tables dictionary
if tables_dict[0][col_idx][0] == tables_dict[table_key][col_idx][0]:
flags_dict[table_key] = 1
# append values to first table
for list_index in range(1, len(tables_dict[table_key][col_idx])):
tables_dict[0][col_idx].append(tables_dict[table_key][col_idx][list_index])
else:
flags_dict[table_key] = False
for table_key in range(1, len(list(tables_dict.keys()))):
if flags_dict[table_key] == 1:
tables_dict.pop(table_key)
# check if transfered symbols are present, check if empty element are present.
trans_flag = False
for key in tables_dict:
for element_list in tables_dict[key]:
for element in element_list:
if not element:
trans_flag = True
# lead all tables in one format (model will be firs row)
if trans_flag:
for key in tables_dict:
lead_row = tables_dict[key][0]
# check len
for row in tables_dict[key][1:]:
if len(lead_row) != len(row):
message = "Row length is invalid: {0} != {1}".format(lead_row, row)
raise CLICMDException(message)
else:
for lead_elem, row_elem in zip(lead_row, row):
if bool(lead_elem) != bool(row_elem):
# transform row_elem in lead_format
# print "lead_elem, row_elem", lead_elem, row_elem
rowidx = tables_dict[key].index(row)
# print "tables_dict[key][rowidx]", tables_dict[key][rowidx]
row_elemidx = tables_dict[key][rowidx].index(row_elem)
# print "tables_dict[key][rowidx][row_elemidx]", tables_dict[key][rowidx][row_elemidx]
new_value = "{0}{1}".format(tables_dict[key][rowidx][row_elemidx - 1], tables_dict[key][rowidx][row_elemidx])
tables_dict[key][rowidx][row_elemidx - 1] = new_value
tables_dict[key][rowidx][row_elemidx] = ""
for key in tables_dict:
for column in tables_dict[key]:
if column[0] == identifier[0]:
for value in column:
if value == identifier[1]:
row_index = column.index(value)
for column1 in tables_dict[key]:
if column1[0] == checker:
table_value = column1[row_index]
break
if table_value == '':
row_index = None
break
if table_value != '':
break
if table_value == '':
table_value = "CLIException: Specified table row has not been found"
return table_value
class CLICmd(object):
"""Class for CLI specific functionality.
Args:
config: environment config.
switches: switches list.
"""
suite_logger = loggers.ClassLogger()
# TODO: add wait_until_value_is_changed method for CLI
def __init__(self, ipaddr, port, login, passw, prompt, devtype, delay=None, build_path=None, img_path=None,
page_break="<?> - help.", xmlrpcport=None):
"""Initialize CLICmd class.
"""
self.timeout = 9
# find out login, password and command prompt for switches from config for defined user
self.ipaddr = ipaddr
self.port = port
self.xmlrpcport = xmlrpcport
self.login = login
self.passw = passw
self.prompt = prompt
self.devtype = devtype
self.build_path = build_path
self.img_path = img_path
self.is_shell = False
self.page_break = page_break
# create ssh connection to switches and store it in self.conn dictionary
self.conn = clissh.CLISSH(self.ipaddr, port=self.port, page_break=page_break,
prompt=self.prompt)
if delay:
self.conn.delay = delay
def _connect_to_switch(self, prompt, timeout=20):
"""SSH connect to switch and wait until prompt string appeared.
Args:
prompt(str): expected CLI prompt.
timeout(int): connection timeout.
Returns:
None
Examples::
self._connect_to_switches(sw_keys=1, prompt="Switch ")
"""
cli_start_path = ''
self.suite_logger.debug("Login on switch with login: {0} and expected prompt is: {1}".format(self.login, prompt))
self.conn.login(self.login, self.passw, timeout=self.timeout)
self.suite_logger.debug("Create Shell")
self.conn.open_shell(raw_output=True)
self.conn.shell.settimeout(self.timeout)
# lxc: run command "python main.py"
if self.devtype == 'lxc':
self.suite_logger.debug("Launched CLI on LXC")
if os.path.exists(os.path.join(self.build_path, self.img_path, 'main.py')) is True:
cli_start_path = os.path.join(self.build_path, self.img_path)
self.conn.shell_command('cd %s && python main.py -a %s -p %s'
% (cli_start_path, self.ipaddr, self.xmlrpcport), timeout=5, ret_code=False, quiet=True)
else:
self.suite_logger.error("Path to CLI image does not exist: %s" % (os.path.join(cli_start_path, 'main.py')))
pytest.exit("Path to CLI image does not exist: %s" % (os.path.join(cli_start_path, 'main.py')))
else:
self.suite_logger.debug("Waiting for CLI on Real switch")
alter = []
# Add one or few expected prompt(s) and action(s) to alternatives list
if isinstance(prompt, str):
prompt = [prompt]
for single_prompt in prompt:
alter.append((single_prompt, None, True, False))
iterations = 12
for i in range(iterations):
time.sleep(10)
login = self.conn.action_on_connect(self.conn.shell, alternatives=alter, timeout=timeout, is_shell=self.is_shell)
if any(login.find(str(single_prompt)) != -1 for single_prompt in prompt):
break
else:
self.suite_logger.debug("Waiting, current prompt is {0}".format(login))
if i == iterations:
login = self.conn.action_on_expect(self.conn.shell, alternatives=alter, timeout=timeout, is_shell=self.is_shell)
def cli_get(self, arguments_list, prompt=None, show=True, timeout=25):
"""Getting values by CLI.
Args:
arguments_list(list): list of arguments to get values.
prompt(str): expected promt or message, takes from cli_set_result.
show(bool): execute command with show prefix
timeout(int): command execution timeout
Raises:
Exception: error on command execution
Returns:
list: List of CLI-GET command results.
Examples::
env.switch[1].cli.cli_get(['enable, none 0 none', 'statistics, Port 1, RX Discards']])
"""
if not prompt:
prompt = self.prompt
result = []
try:
self._connect_to_switch(prompt)
if arguments_list != [["readOnly"]]:
for arguments in arguments_list:
args = arguments
table = args[0].strip()
identifier = []
if len(args[1].strip().split(' ')) > 2:
# " @" - delimiter between column name and value.
if "@" in args[1]:
identifier = args[1].split(" @")
else:
str_val_temp = []
for str_val_idx in range(len(args[1].strip().split(' ')) - 1):
str_val_temp.append(args[1].strip().split(' ')[str_val_idx])
identifier.append(" ".join(str_val_temp))
identifier.append(args[1].strip().split(' ')[-1])
else:
identifier = args[1].strip().split(' ')
checker = args[2].strip()
if checker == 'none':
command = table
else:
if show:
command = 'show ' + table
else:
command = table
# Run cli command and get output "<?> - help."
alternatives = [("<?> - help.", " ", False, False), ]
data, err = self.conn.shell_command(command, alternatives=alternatives, timeout=timeout, ret_code=False, quiet=True, raw_output=True)
# Data validation
if len(data.split('\n')) == 5 and "....." not in data.split('\n')[-3]:
result.append([data.split('\n')[-3].strip()])
elif len(data.split('\n')) == 2:
result.append([data.split('\n')[-1].strip()])
else:
# Remove page break from data
data = data.replace("<?> - help.", "")
value = get_table_value(data, identifier=identifier, checker=checker)
result.append([value])
else:
result = [["readOnly"]]
# Close SSH connections
except Exception:
self.suite_logger.debug("Cli_get. Exception traceback data: {0}".format(traceback.format_exc()))
raise
finally:
if self.conn:
self.conn.close()
return result
def cli_get_all(self, arguments_list, prompt=None, timeout=25, interval=0.1):
"""Getting values by CLI.
Args:
arguments_list(list): list of arguments to get values
prompt(str): expected promt or message, takes from cli_set_result
timeout(int): command execution timeout
interval(int): time interval between read attempts
Raises:
Exception: error on command execution
Returns:
list: List of CLI-GET command results
"""
result = []
alternatives = []
try:
for command in arguments_list:
# Run cli command and get output "<?> - help."
alternatives.append(("<?> - help.", " ", False, False))
if "'" in command[0]:
command[0] = command[0].replace("'", '"')
if "::::" in command[0]:
command[0], answer = command[0].split("::::")
alternatives.append(("('yes'/'no'):", answer, False, False))
data, err = self.conn.shell_command(command[0], alternatives=alternatives, timeout=timeout,
ret_code=False, quiet=True, raw_output=True, interval=interval)
data = data.replace("<?> - help.", "")
result.append(data.replace(command[0], "").strip())
except Exception:
self.suite_logger.debug("Cli_get_all. Exception traceback data: {0}".format(traceback.format_exc()))
raise
return result
def cli_set(self, commands_list, timeout=5, prompt=None, connect=True):
"""Setting values by CLI.
Args:
commands_list(list): list of commands.
prompt(str): expected promt or message, takes from cli_set_result.
timeout(int): command execution timeout
connect(bool): Flag if connection should be established before login procedure.
Raises:
Exception: error on command execution
Returns:
list: List of CLI-SET command results.
Examples::
env.switch[1].cli.cli_set([["enable"], ["vlan-database"], ["vlan 10"]])
"""
alternatives = []
tabulation = False
if not prompt:
prompt = self.prompt
result = []
try:
if connect:
self._connect_to_switch(prompt)
self.suite_logger.debug("Connection to switch established.")
if commands_list != [["readOnly"]]:
for commands in commands_list:
command = commands[0]
if len(commands) == 2:
if isinstance(commands[1], int):
timeout = commands[1]
elif isinstance(commands[1], str) or isinstance(commands[1], str):
if "\t" in commands[1]:
tabulation = commands[1]
# Replace ' by ", for cli support
if "'" in command:
command = command.replace("'", '"')
if "::::" in command:
command, answer = command.split("::::")
alternatives = [("('yes'/'no'):", answer, False, False), ]
# Run CLI commands and get answer
data, err = self.conn.shell_command(command, alternatives=alternatives,
timeout=timeout, ret_code=False,
quiet=True, raw_output=True,
tabulation=tabulation)
# Return error message if it present in output data
if len(data.split('\n')) > 2 and ("Error!" in data or
"CLIException" in data or
"Invalid command has been entered" in data or
"Incomplete command has been" in data or
"Notice" in data):
data = data.replace("<?> - help.", "")
# Split multi error message if exist in one row
result.append([(("".join(data.split('\n')[2:-1])).strip()).replace("\r", " ")])
else:
# tabulation support processing:
# one tab case:
if tabulation == "\t":
res = []
for line in data.split('\n')[1:-1]:
for elem in line.split(' '):
res.append(elem.strip())
result.append(res)
# double tab case:
elif tabulation == "\t\t":
res = []
split_data = []
for line in data.split('\n'):
split_data.append(line.strip())
split_point = split_data[:-1].index(split_data[-1])
for line in split_data[split_point + 1:-1]:
res.append(line.strip())
result.append(res)
elif tabulation == "\t\t\t":
res = []
for line in data.split('\n')[1:-1]:
res.append(line.strip())
result.append(res)
else:
result.append([data.split('\n')[-1].strip()])
else:
result = [["readOnly"]]
except Exception as err:
self.suite_logger.error("Cli_set error: %s" % (err, ))
self.suite_logger.error("Cli_set. Exception traceback data:\n%s" % (traceback.format_exc(), ))
raise
finally:
if connect:
if self.conn:
self.conn.close()
return result
def cli_connect(self, prompt=None):
"""SSH connect to switch and wait untill prompt string appeared.
Args:
prompt(str): expected CLI prompt.
Returns:
None
Examples::
env.switch[1].cli.cli_connect(prompt="Switch ")
"""
if not prompt:
prompt = self.prompt
self._connect_to_switch(prompt)
def cli_disconnect(self):
"""Close ssh connection to switch.
Raises:
CLICMDException: error on disconnect
Returns:
None
Examples::
env.switch[1].cli.cli_disconnect()
"""
try:
if self.conn:
self.conn.close()
except Exception as err:
raise CLICMDException(err)
def update_prompt(self, prompt):
"""Updating prompt in both clissh and clicmd_ons objects.
Args:
prompt(str): Prompt to be updated
"""
self.prompt = prompt
self.conn.prompt = prompt
| {
"content_hash": "cd6cad75506f620420a39f24166ffe11",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 153,
"avg_line_length": 38.90084033613445,
"alnum_prop": 0.49477231487082,
"repo_name": "orestkreminskyi/taf",
"id": "47f43e024f3c8b50fabc67d0e29259e4270c5c2f",
"size": "23741",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taf/testlib/clicmd_ons.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6745"
},
{
"name": "JavaScript",
"bytes": "1771"
},
{
"name": "Python",
"bytes": "3869203"
},
{
"name": "Shell",
"bytes": "3146"
},
{
"name": "Tcl",
"bytes": "68098"
},
{
"name": "XSLT",
"bytes": "41538"
}
],
"symlink_target": ""
} |
"""
Command line interface module for OpenTMI client
License: MIT
"""
from __future__ import print_function
import sys
import json
import argparse
import logging
import pkg_resources # part of setuptools
from opentmi_client.api import OpenTmiClient
from opentmi_client.utils.exceptions import OpentmiException
EXIT_CODE_SUCCESS = 0
EXIT_CODE_NOT_IMPLEMENTED = 1
EXIT_CODE_CONNECTION_ERROR = 60
EXIT_CODE_OPERATION_TIMEOUT = 61
EXIT_CODE_INVALID_PARAMETERS = 62
EXIT_CODE_OPERATION_FAILED = 63
def get_subparser(subparsers, name, func=None, **kwargs):
"""
Get subparser
:param subparsers:
:param name:
:param func:
:param kwargs:
:return: Parser
"""
tmp_parser = subparsers.add_parser(name, **kwargs)
if func:
tmp_parser.set_defaults(func=func)
return tmp_parser
class OpentTMIClientCLI(object):
"""
OpenTMICLientCLI
"""
def __init__(self, args=None):
"""
Constructor for CLI
:param args:
"""
self.console_handler = logging.StreamHandler()
self.logger = logging.getLogger("opentmi")
self.logger.handlers = [self.console_handler]
if args is None:
args = sys.argv[1:]
self.args = self.argparser_setup(args)
self.set_log_level_from_verbose()
def execute(self):
"""
Execute
:return: 0
"""
if hasattr(self.args, "func") and callable(self.args.func):
try:
return self.args.func(self.args)
except NotImplementedError as error:
self.logger.error("Not implemented %s", str(error))
return EXIT_CODE_NOT_IMPLEMENTED
self.parser.print_usage()
return EXIT_CODE_SUCCESS
def argparser_setup(self, sysargs):
"""
Configure CLI (Command Line Options) options
:param self:
:param sysargs:
:return: Returns OptionParser's tuple of (options, arguments)
"""
parser = argparse.ArgumentParser()
parser.add_argument('-v',
dest="verbose",
action="count",
help="verbose level... repeat up to three times.")
parser.add_argument('-s', '--silent',
dest="silent", default=False,
action="store_true",
help="Silent - only errors will be printed")
parser.add_argument('--host',
dest='host',
default='localhost',
help='OpenTMI host, default: localhost')
parser.add_argument('--user',
dest='user',
default=None,
help='username')
parser.add_argument('--password',
dest='password',
default=None,
help='password')
parser.add_argument('--token',
dest='token',
default=None,
help='Authentication token')
parser.add_argument('--token_service',
dest='token_service',
default=None,
help='Optional authentication service')
parser.add_argument('-p', '--port',
dest='port',
type=int,
default=0,
help='OpenTMI port')
subparsers = parser.add_subparsers(title='subcommand',
help='sub-command help',
metavar='<subcommand>')
get_subparser(subparsers, 'version',
func=self.subcmd_version_handler,
help='Display version information')
parser_list = get_subparser(subparsers, 'list',
func=self.subcmd_list_handler,
help='List something')
parser_list.add_argument('--json',
dest='json',
default=False,
action='store_true',
help='results as json')
parser_list.add_argument('--testcases',
dest='testcases',
action='store_true',
default=None,
help='Testcases')
parser_list.add_argument('--campaigns',
dest='campaigns',
action='store_true',
default=None,
help='Campaigns')
parser_list.add_argument('--builds',
dest='builds',
action='store_true',
default=None,
help='Builds')
parser_store = get_subparser(subparsers, 'store',
help='Create something')
subsubparsers = parser_store.add_subparsers(title='subcommand',
help='sub-command help',
metavar='<subcommand>')
parser_store_testcase = get_subparser(subsubparsers, 'testcase',
func=self.subcmd_store_testcase,
help='Store Testcase')
parser_store_testcase.add_argument('--file',
dest='file',
default=None,
help='Filename',
type=self.read_json_file,
required=True)
parser_store_result = get_subparser(subsubparsers, 'result',
func=self.subcmd_store_result,
help='Store Test Result')
parser_store_result.add_argument('--file',
dest='file',
default=None,
help='Filename',
type=self.read_json_file,
required=True)
parser_store_build = get_subparser(subsubparsers, 'build',
func=self.subcmd_store_build,
help='Store Build')
parser_store_build.add_argument('--file',
dest='file',
default=None,
help='Filename',
type=self.read_json_file,
required=True)
args = parser.parse_args(args=sysargs)
self.parser = parser
return args
def read_json_file(self, filename):
"""
:param filename: json filename to be read
:returns: Dict
"""
try:
with open(filename) as data_file:
return json.load(data_file)
except IOError as error:
self.logger.error("Given file (%s) is not valid! %s", filename, error)
raise argparse.ArgumentTypeError(error)
def set_log_level_from_verbose(self):
"""
Sets logging level, silent, or some of verbose level
Args:
command line arguments
"""
if self.args.silent or not self.args.verbose:
self.console_handler.setLevel('ERROR')
self.logger.setLevel('ERROR')
elif self.args.verbose == 1:
self.console_handler.setLevel('WARNING')
self.logger.setLevel('WARNING')
elif self.args.verbose == 2:
self.console_handler.setLevel('INFO')
self.logger.setLevel('INFO')
elif self.args.verbose >= 3:
self.console_handler.setLevel('DEBUG')
self.logger.setLevel('DEBUG')
def subcmd_version_handler(self, _args):
"""
:param self:
:param _args:
:return:
"""
versions = pkg_resources.require("opentmi_client")
if self.args.verbose:
for ver in versions:
print(ver)
else:
print(versions[0].version)
return EXIT_CODE_SUCCESS
def subcmd_store_handler(self, _args):
"""
:param self:
:param _args:
:return:
"""
raise NotImplementedError('store')
@staticmethod
def create_client(args):
"""
Create OpenTmiClient instance based on args
:param args: arguments
:return: OpenTmiClient instance
"""
client = OpenTmiClient(host=args.host, port=args.port)
if args.user:
if args.password:
client.login(args.user, args.password)
else:
raise OpentmiException("password missing")
elif args.token:
service = args.token_service or "github"
client.login_with_access_token(args.token, service)
return client
def subcmd_store_build(self, args):
"""
:param self:
:param args:
:return:
"""
client = self.create_client(args)
client.upload_build(args.file)
return EXIT_CODE_SUCCESS
def subcmd_store_testcase(self, args):
"""
:param self:
:param args:
:return:
"""
client = self.create_client(args)
client.update_testcase(args.file)
return EXIT_CODE_SUCCESS
def subcmd_store_result(self, args):
"""
:param self:
:param args:
:return:
"""
client = self.create_client(args)
client.upload_results(args.file)
return EXIT_CODE_SUCCESS
def subcmd_list_handler(self, args):
"""
:param args:
:return:
"""
client = self.create_client(args)
if args.testcases:
testcases = client.get_testcases()
if args.json:
print(json.dumps(testcases))
else:
print("Test cases:")
for test_case in testcases:
print(test_case['tcid'])
elif args.campaigns:
campaigns = client.get_campaign_names()
if args.json:
print(campaigns)
else:
for campaign in campaigns:
print(campaign)
return 0
def opentmiclient_main():
"""
Function used to drive CLI (command line interface) application.
Function exits back to command line with ERRORLEVEL
Returns:
Function exits with success-code
"""
cli = OpentTMIClientCLI()
try:
sys.exit(cli.execute())
except OpentmiException as error:
print(str(error))
sys.exit(EXIT_CODE_OPERATION_FAILED)
if __name__ == '__main__':
opentmiclient_main()
| {
"content_hash": "c0bdfbe063786b250339f295e2a2bad8",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 82,
"avg_line_length": 33.701183431952664,
"alnum_prop": 0.4737951013958388,
"repo_name": "OpenTMI/opentmi-client-python",
"id": "dfe9bcf01f421644e5e626eb303b65082c16f663",
"size": "11414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentmi_client/cli/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46281"
}
],
"symlink_target": ""
} |
"""A helper function for parsing and executing Dialogflow skills."""
import os
import logging
from voluptuous import Required
from opsdroid.const import DEFAULT_LANGUAGE
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {Required("project-id"): str, "min-score": float}
async def call_dialogflow(text, opsdroid, config):
"""Call Dialogflow to get intent from text.
Dialogflow will return an object with a few restrictions, you can't
iterate it and the only way to access each element is by using dot
notation.
Args:
text (string): message.text this is the text obtained from the user.
opsdroid (OpsDroid): An instance of opsdroid.core.
config (dict): configuration settings from the file config.yaml.
Return:
A 'google.cloud.dialogflow_v2.types.DetectIntentResponse' object.
Raises:
Warning: if Google credentials are not found in environmental
variables or 'project-id' is not in config.
"""
try:
import dialogflow
if os.environ.get("GOOGLE_APPLICATION_CREDENTIALS") and config.get(
"project-id"
):
session_client = dialogflow.SessionsClient()
project_id = config.get("project-id")
language = config.get("lang") or opsdroid.config.get(
"lang", DEFAULT_LANGUAGE
)
session = session_client.session_path(project_id, "opsdroid")
text_input = dialogflow.types.TextInput(text=text, language_code=language)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input
)
return response
else:
raise Warning(
_(
"Authentication file not found or 'project-id' not in configuration, dialogflow parser will not be available."
)
)
except ImportError:
_LOGGER.error(
_(
"Unable to find dialogflow dependency. Please install dialogflow with the command pip install dialogflow if you want to use this parser."
)
)
opsdroid.config["parsers"][0]["enabled"] = False
async def parse_dialogflow(opsdroid, skills, message, config):
"""Parse a message against all Dialogflow skills.
This function does a few things, first it will check if the
intent confidence is higher than the minimum score set on config,
then it will try to match an action or an intent to a matcher and
add the proper skills to the skills list.
At the moment a broad exception is being used due to the fact that
dialogflow library doesn't have the best documentation yet and it's
not possible to know what sort of exceptions the library will return.
Args:
opsdroid (OpsDroid): An instance of opsdroid.core.
skills (list): A list containing all skills available.
message(object): An instance of events.message.
config (dict): configuration settings from the
file config.yaml.
Return:
Either empty list or a list containing all matched skills.
"""
try:
result = await call_dialogflow(message.text, opsdroid, config)
matched_skills = []
if (
"min-score" in config
and result.query_result.intent_detection_confidence < config["min-score"]
):
_LOGGER.debug(_("Dialogflow confidence lower than min-score."))
return matched_skills
if result:
for skill in skills:
for matcher in skill.matchers:
if "dialogflow_action" in matcher or "dialogflow_intent" in matcher:
if (
matcher.get("dialogflow_action")
== result.query_result.action
) or (
matcher.get("dialogflow_intent")
== result.query_result.intent.display_name
):
message.dialogflow = result.query_result
_LOGGER.debug(
_("Matched against skill %s"), skill.config["name"]
)
matched_skills.append(
{
"score": result.query_result.intent_detection_confidence,
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
except Exception as error:
# TODO: Refactor broad exception
_LOGGER.error(_("There was an error while parsing to dialogflow - %s."), error)
| {
"content_hash": "f299bfb11aca0e04479d52bdecf4fd57",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 153,
"avg_line_length": 37.70992366412214,
"alnum_prop": 0.573076923076923,
"repo_name": "jacobtomlinson/opsdroid",
"id": "3889f0aa248c2637867bce2fea5c33e605b89c63",
"size": "4940",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opsdroid/parsers/dialogflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1755"
},
{
"name": "Jinja",
"bytes": "2320"
},
{
"name": "Jupyter Notebook",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1178799"
}
],
"symlink_target": ""
} |
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import logging
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
serve()
| {
"content_hash": "9a34224e8d2b87dfbe61a194e54be206",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 25,
"alnum_prop": 0.717037037037037,
"repo_name": "ejona86/grpc",
"id": "acc5adbfe1553b1f6c58b327cc4fce710bba863c",
"size": "1252",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/python/helloworld/greeter_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "38831"
},
{
"name": "C",
"bytes": "1377708"
},
{
"name": "C#",
"bytes": "106367"
},
{
"name": "C++",
"bytes": "16353334"
},
{
"name": "CMake",
"bytes": "29311"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "Cython",
"bytes": "258768"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "179860"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "13923"
},
{
"name": "JavaScript",
"bytes": "5572"
},
{
"name": "Objective-C",
"bytes": "724357"
},
{
"name": "Objective-C++",
"bytes": "79351"
},
{
"name": "PHP",
"bytes": "486781"
},
{
"name": "PowerShell",
"bytes": "4516"
},
{
"name": "Python",
"bytes": "3814860"
},
{
"name": "Ruby",
"bytes": "650063"
},
{
"name": "Shell",
"bytes": "766652"
},
{
"name": "Starlark",
"bytes": "805915"
},
{
"name": "Swift",
"bytes": "7487"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
} |
import asyncio
import aiohttp
from aiohttp import web
import zmq
from zmq.asyncio import Context, ZMQEventLoop
from rpc import WSRPCHandler
class RPC(WSRPCHandler):
"""Application RPC. RPC methods should start with the `rpc_` prefix"""
def __init__(self, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._context = Context()
async def rpc_echo(self, ws, method, blob):
ws.send_bytes(blob)
async def rpc_echo_worker(self, ws, method, blob):
socket = self._context.socket(zmq.DEALER)
socket.connect('tcp://localhost:5559')
await socket.send_multipart([b'', blob])
message = await socket.recv_multipart()
assert message[-1] == blob, '%s does not equal %s' % (
message[-1], blob)
ws.send_bytes(message[-1])
# Echo worker streams `closing` after echoing
message = await socket.recv_multipart()
assert message[-1] == b'closing', '%s does not equal %s' % (
message1[-1], 'closing')
ws.send_bytes(message[-1])
# CLI
def init_function(argv):
rpc = RPC()
app = web.Application()
# Pass the websocket handler to the router, not the rpc method...
app.router.add_route('GET', '/', rpc.websocket_handler)
return app
async def init(loop):
rpc = RPC(loop=loop)
app = web.Application(loop=loop)
app.router.add_route('GET', '/', rpc.websocket_handler)
handler = app.make_handler()
srv = await loop.create_server(handler, '127.0.0.1', 8080)
print('Server started at http://127.0.0.1:8080')
return srv, handler
if __name__ == '__main__':
loop = ZMQEventLoop()
asyncio.set_event_loop(loop)
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
| {
"content_hash": "8a2f8320376c13ca7454ccd632bc748c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 74,
"avg_line_length": 28.70149253731343,
"alnum_prop": 0.6229849193967759,
"repo_name": "davebshow/aiogoblin",
"id": "f453d01210c792fb53bbb0b693663d811aaff5c4",
"size": "1923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiogoblin/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9163"
}
],
"symlink_target": ""
} |
""" Entry point for the different contexts supported by the application: customer (general customer configurations),
user (user specific data, always associated to a customer context) and session (request information, can be
associated to a customer context for general environment batch calls or to a user context in the case of user
requests)
"""
import copy
from barbante import config
from barbante.context.customer_context import CustomerContext
from barbante.context.session_context import SessionContext
from barbante.data.MongoDBProxy import MongoDBProxy
import barbante.utils.logging as barbante_logging
log = barbante_logging.get_logger(__name__)
DEFAULT_DB_PROXY_CLASS = MongoDBProxy
_customer_contexts_by_env = {}
""" Dict of lazily loaded customer contexts.
Each context is responsible for keeping its own data proxy with its own pool of connections to the database.
In a production environment, a Barbante instance will hold one customer context for each customer served.
That context will be used by all open SessionContext instances in all Tornado workers handling connections
in Reel for that environment.
A single customer context should be able to serve as many connections as there are tornado workers times the
number of concurrent recommendation algorithms running on each hybrid recommender instance.
"""
""" Configuration Constants
"""
BEFORE_SCORING = 'BEFORE_SCORING'
AFTER_SCORING = 'AFTER_SCORING'
class _DatabaseConfig(object):
""" Private module level class used to hold database configuration settings.
"""
def __init__(self, host, host_raw, name, name_raw, pool_size, read_preference, replica_set, replica_set_raw):
self.host = host
""" The database hostname or ip addresses for recommendation-specific collections (Barbante)
"""
self.host_raw = host_raw
""" The database hostname or ip addresses for raw data collections (API)
"""
self.name = name
""" The database name for recommendation-specific collections (Barbante)
"""
self.name_raw = name_raw
""" The database name for raw data collections (API)
"""
self.pool_size = pool_size
""" The size of the database pool
"""
self.read_preference = read_preference
""" The read preference when working in a cluster, defaults to primary
"""
self.replica_set = replica_set
""" The name of the replica set.
When connecting to a Mongo Replica Set Cluster, this is a required parameter.
"""
self.replica_set_raw = replica_set_raw
""" The name of the replica set for raw data connections.
When connecting to a Mongo Replica Set Cluster, this is a required parameter.
"""
class _CacheConfig(object):
""" Private module level class used to hold cache configuration settings.
"""
def __init__(self, env, hosts):
self.environment = env
""" The environment name, used
"""
self.hosts = hosts
""" The cache hostnames (or ip addresses) and ports
"""
class ConfigException(Exception):
""" Raised when exceptions are found in configuration files or a non-existing environment config is requested
"""
pass
def create_customer_context(env, data_proxy=None):
""" Creates a new CustomerContext instance referencing the envinroment ``env``.
Should only be called directly when testing. For production code, see get_preloaded_customer_context().
:param env: The environment name.
:param data_proxy: A data proxy instance to reuse instead of creating a new one.
:returns: A CostumerContext instance.
"""
try:
env_config = config.database[env]
customer = env_config['customer']
env_default_settings = env_config['sessions']['default']
db_options = env_default_settings['options']
hosts = env_default_settings['hosts']
hosts_raw = env_default_settings['hosts_raw']
database = env_default_settings['database']
database_raw = env_default_settings['database_raw']
pool_size = db_options.get('pool_size')
read_preferences = db_options['read']
replica_set = db_options.get('replica_set') if db_options else None
replica_set_raw = db_options.get('replica_set_raw') if db_options else None
cache_settings = env_default_settings.get('cache')
cache_hosts = cache_settings.get('hosts') if cache_settings else None
context = CustomerContext(customer, data_proxy if data_proxy else DEFAULT_DB_PROXY_CLASS,
_DatabaseConfig(hosts, hosts_raw, database, database_raw,
pool_size, read_preferences, replica_set, replica_set_raw),
_CacheConfig(env, cache_hosts) if cache_hosts else None)
log.info('Environment "{0}" configurations loaded successfully'.format(env))
return context
except (AttributeError, KeyError) as err:
log.error('Error loading environment "{0}" configurations: {1}'.format(env, err))
def clone_preloaded_customer_context(env, data_proxy=None, make_new_data_proxy=False):
""" Clones an existing customer context.
:param env: environment whose customer context will be cloned
:param data_proxy: sets an optional custom data_proxy
:returns: The CustomerContext clone.
"""
original_context = get_preloaded_customer_context(env)
original_proxy = original_context.data_proxy
# Before copying, temporarily remove the data proxy so that deepcopy doesn't see it, otherwise it would throw
original_context.data_proxy = None
clone = copy.deepcopy(original_context)
original_context.data_proxy = original_proxy
if data_proxy is None:
if make_new_data_proxy:
data_proxy = DEFAULT_DB_PROXY_CLASS
else:
data_proxy = original_proxy
clone.set_data_proxy(data_proxy)
return clone
def get_preloaded_customer_context(env):
""" Gets the singleton customer context corresponding to the environment ``env``.
Creates the context if it doesn't exist yet.
:param env: The environment name
:returns: A CostumerContext instance
"""
if env not in _customer_contexts_by_env:
_customer_contexts_by_env[env] = create_customer_context(env)
return _customer_contexts_by_env[env]
def init_session(environment=None, user_id=None, context_filter_string=None, customer_ctx=None, algorithm=None):
""" Initializes a session.
:param environment: Session environment. Mandatory if customer_ctx is None, otherwise optional.
:param user_id: External user id. If None, UserContext won't be created.
:param context_filter_string: A ContextFilter instance.
:param customer_ctx: A CustomerContext instance. Mandatory if environment is None, otherwise optional.
Used only in tests; should be None for production.
:returns: A new SessionContext
"""
if customer_ctx is None:
# If customer context was not specified, use the env's corresponding singleton instance
customer_ctx = get_preloaded_customer_context(environment)
return SessionContext(customer_ctx, user_id=user_id,
context_filter_string=context_filter_string, algorithm=algorithm)
| {
"content_hash": "892f47cfafd217ba1d9a60768401cc57",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 116,
"avg_line_length": 40.11764705882353,
"alnum_prop": 0.680218608371101,
"repo_name": "hypermindr/barbante",
"id": "ac3600206c54e093cbf7952677aa2205d2212381",
"size": "7502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbante/context/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "728872"
}
],
"symlink_target": ""
} |
import opentuner
from opentuner import ConfigurationManipulator
from opentuner import IntegerParameter
from opentuner import MeasurementInterface
from opentuner import Result
import json
import time
import sys
import re
class TransposeTune(MeasurementInterface):
def manipulator(self):
"""
Define the search space by creating a
ConfigurationManipulator
"""
self.mintilesize = 2
self.granularity = 1
assert(0 < self.granularity)
minsize = max(self.mintilesize / self.granularity, 1)
maxsize = minsize + self.granularity
m_max = max(min(self.args.maxm, self.args.end), maxsize)
n_max = max(min(self.args.maxn, self.args.end), maxsize)
m_max = (m_max + self.granularity - 1) / self.granularity
n_max = (n_max + self.granularity - 1) / self.granularity
m_param = IntegerParameter("M", minsize, m_max)
n_param = IntegerParameter("N", minsize, n_max)
manipulator = ConfigurationManipulator()
manipulator.add_parameter(m_param)
manipulator.add_parameter(n_param)
return manipulator
def seed_configurations(self):
m_seed = [self.args.n, self.args.m][0 != self.args.m]
n_seed = [self.args.m, self.args.n][0 != self.args.n]
if 0 == m_seed or 0 == n_seed:
return []
else:
return [{"M": max(m_seed, self.mintilesize),
"N": max(n_seed, self.mintilesize)}]
def objective(self):
return opentuner.search.objective.MaximizeAccuracyMinimizeSize()
def run(self, desired_result, input, limit):
"""
Compile and run a given configuration then
return performance
"""
cfg = desired_result.configuration.data
nruns = max(self.args.nruns, 1)
begin = max(self.args.begin, self.mintilesize)
end = max(self.args.end, self.mintilesize)
run_cmd = (
"CHECK=-1" # repeatable runs
" LIBXSMM_TCOPY_M=" + str(self.granularity * cfg["M"]) +
" LIBXSMM_TCOPY_N=" + str(self.granularity * cfg["N"]) +
" ./transpose.sh o" + " " + str(end) + " " + str(end) +
" " + str(end) + " " + str(end) + " " + str(nruns) +
" -" + str(begin))
run_result = self.call_program(run_cmd)
if (0 == run_result["returncode"]):
match = re.search(
"\\s*duration:\\s+([0-9]+(\\.[0-9]*)*)",
str(run_result["stdout"]))
assert(match is not None)
mseconds = float(match.group(1)) / nruns
assert(0 < mseconds)
frequency = 1000.0 / mseconds
kernelsize = (self.granularity**2) * cfg["M"] * cfg["N"]
return Result(time=mseconds, accuracy=frequency, size=kernelsize)
else:
sys.tracebacklimit = 0
raise RuntimeError("Execution failed for \"" + run_cmd + "\"!")
def save_final_config(self, configuration):
"""
called at the end of tuning
"""
filename = (
"transpose-" + str(max(self.args.begin, 1)) +
"_" + str(max(self.args.end, 1)) +
"_" + str(max(self.args.nruns, 1)) +
time.strftime("-%Y%m%d-%H%M%S") + ".json")
print("Optimal block size written to " + filename +
": ", configuration.data)
# self.manipulator().save_to_file(configuration.data, filename)
with open(filename, 'w') as fd:
json.dump(configuration.data, fd)
if __name__ == "__main__":
argparser = opentuner.default_argparser()
argparser.add_argument(
"begin", type=int,
help="Begin of the range (min. M and N)")
argparser.add_argument(
"end", type=int,
help="End of the range (max. M and N)")
argparser.add_argument(
"nruns", type=int, default=100, nargs='?',
help="Number of experiments per epoch")
argparser.add_argument(
"m", type=int, default=0, nargs='?',
help="Initial tile size (M)")
argparser.add_argument(
"n", type=int, default=0, nargs='?',
help="Initial tile size (N)")
argparser.add_argument(
"maxm", type=int, default=160, nargs='?',
help="Max. tile size (M)")
argparser.add_argument(
"maxn", type=int, default=160, nargs='?',
help="Max. tile size (N)")
TransposeTune.main(argparser.parse_args())
| {
"content_hash": "c7d872b03c58a2d49d38d5e06c1e998a",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 38.4051724137931,
"alnum_prop": 0.567003367003367,
"repo_name": "hfp/libxsmm",
"id": "1200e9bf62b85086f127ed1e7334d0c02fe5fcc1",
"size": "5321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/transpose/transpose_opentuner.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3115"
},
{
"name": "C",
"bytes": "8335143"
},
{
"name": "C++",
"bytes": "84416"
},
{
"name": "CSS",
"bytes": "242"
},
{
"name": "Fortran",
"bytes": "102021"
},
{
"name": "HTML",
"bytes": "390"
},
{
"name": "JavaScript",
"bytes": "1062"
},
{
"name": "Makefile",
"bytes": "158870"
},
{
"name": "Python",
"bytes": "36612"
},
{
"name": "Shell",
"bytes": "84205"
},
{
"name": "Starlark",
"bytes": "882"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0021_auto_20151223_1756'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='avatar',
field=models.ImageField(default=b'core/media/images/defaultavatar.png', upload_to=b'core/media/images/avatars'),
),
]
| {
"content_hash": "247a1756bad7ef57dbba9b6284638a78",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 124,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.6277533039647577,
"repo_name": "tfiers/arenberg-online",
"id": "17697d52b64f71ec558bb88edeb0181a49de8e38",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/migrations/0022_auto_20151223_1802.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "62"
},
{
"name": "CSS",
"bytes": "31305"
},
{
"name": "HTML",
"bytes": "230598"
},
{
"name": "JavaScript",
"bytes": "96170"
},
{
"name": "Python",
"bytes": "178246"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Location
admin.site.register(Location)
| {
"content_hash": "f3fba3f1f2149a5562552ae60b5c6e62",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 18.8,
"alnum_prop": 0.8191489361702128,
"repo_name": "pugpe/pugpe",
"id": "144f963dd3940c981dd88137eea7ff4a427a0d34",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/geo/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8063"
},
{
"name": "Dockerfile",
"bytes": "80"
},
{
"name": "HTML",
"bytes": "26786"
},
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "192729"
}
],
"symlink_target": ""
} |
from cogbot.cogs.robo_mod.robo_mod_action import RoboModAction
from cogbot.cogs.robo_mod.robo_mod_trigger import RoboModTrigger
class ReplyToAuthorAction(RoboModAction):
def __init__(self):
self.content: str = None
async def update(self, state: "RoboModServerState", data: dict):
self.content = data["content"]
async def apply(self, trigger: RoboModTrigger):
content = f"{trigger.author.mention} {self.content}"
await trigger.bot.send_message(trigger.channel, content=content)
| {
"content_hash": "b2b92f38f79f595cf719437ac89f8393",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 37.5,
"alnum_prop": 0.7161904761904762,
"repo_name": "Arcensoth/cogbot",
"id": "b9002db3b994d82f288f5515ee0b8da84cc3655c",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogbot/cogs/robo_mod/actions/reply_to_author.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193760"
}
],
"symlink_target": ""
} |
import logging
# From http://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format(self, record)
# Custom logger class with multiple destinations
class ColoredLogger(logging.Logger):
FORMAT = "[%(asctime)-15s][$BOLD%(name)-20s$RESET][%(levelname)-18s] %(message)s ($BOLD%(filename)s$RESET:%(lineno)d)"
COLOR_FORMAT = formatter_message(FORMAT, True)
def __init__(self, name):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(self.COLOR_FORMAT)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
return
| {
"content_hash": "5c4036d55bc81452825fe1983786c9a2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 123,
"avg_line_length": 30.220338983050848,
"alnum_prop": 0.6561974200785193,
"repo_name": "huqa/pyfibot",
"id": "e219cb7a9aceb563b3314a6a734b5b81e0542ac2",
"size": "1783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyfibot/colorlogger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "319679"
},
{
"name": "Shell",
"bytes": "6833"
}
],
"symlink_target": ""
} |
__usage__ = \
"""A simple script to automatically produce sitemaps for a webserver,
in the Google Sitemap Protocol (GSP).
Usage: python sitemap_gen.py --config=config.xml [--help] [--testing]
--config=config.xml, specifies config file location
--help, displays usage message
--testing, specified when user is experimenting
"""
# Please be careful that all syntax used in this file can be parsed on
# Python 1.5 -- this version check is not evaluated until after the
# entire file has been parsed.
import sys
if sys.hexversion < 0x02020000:
print 'This script requires Python 2.2 or later.'
print 'Currently run with version: %s' % sys.version
sys.exit(1)
import fnmatch
import glob
import gzip
import md5
import os
import re
import stat
import time
import types
import urllib
import urlparse
import xml.sax
# True and False were introduced in Python2.2.2
try:
testTrue=True
del testTrue
except NameError:
True=1
False=0
# Text encodings
ENC_ASCII = 'ASCII'
ENC_UTF8 = 'UTF-8'
ENC_IDNA = 'IDNA'
ENC_ASCII_LIST = ['ASCII', 'US-ASCII', 'US', 'IBM367', 'CP367', 'ISO646-US'
'ISO_646.IRV:1991', 'ISO-IR-6', 'ANSI_X3.4-1968',
'ANSI_X3.4-1986', 'CPASCII' ]
ENC_DEFAULT_LIST = ['ISO-8859-1', 'ISO-8859-2', 'ISO-8859-5']
# Available Sitemap types
SITEMAP_TYPES = ['web', 'mobile', 'news']
# General Sitemap tags
GENERAL_SITEMAP_TAGS = ['loc', 'changefreq', 'priority', 'lastmod']
# News specific tags
NEWS_SPECIFIC_TAGS = ['keywords', 'publication_date', 'stock_tickers']
# News Sitemap tags
NEWS_SITEMAP_TAGS = GENERAL_SITEMAP_TAGS + NEWS_SPECIFIC_TAGS
# Maximum number of urls in each sitemap, before next Sitemap is created
MAXURLS_PER_SITEMAP = 50000
# Suffix on a Sitemap index file
SITEINDEX_SUFFIX = '_index.xml'
# Regular expressions tried for extracting URLs from access logs.
ACCESSLOG_CLF_PATTERN = re.compile(
r'.+\s+"([^\s]+)\s+([^\s]+)\s+HTTP/\d+\.\d+"\s+200\s+.*'
)
# Match patterns for lastmod attributes
DATE_PATTERNS = map(re.compile, [
r'^\d\d\d\d$',
r'^\d\d\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\dZ$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d[+-]\d\d:\d\d$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?Z$',
r'^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d+)?[+-]\d\d:\d\d$',
])
# Match patterns for changefreq attributes
CHANGEFREQ_PATTERNS = [
'always', 'hourly', 'daily', 'weekly', 'monthly', 'yearly', 'never'
]
# XML formats
GENERAL_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
NEWS_SITEINDEX_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<sitemapindex\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'siteindex.xsd">\n'
SITEINDEX_FOOTER = '</sitemapindex>\n'
SITEINDEX_ENTRY = \
' <sitemap>\n' \
' <loc>%(loc)s</loc>\n' \
' <lastmod>%(lastmod)s</lastmod>\n' \
' </sitemap>\n'
GENERAL_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
NEWS_SITEMAP_HEADER = \
'<?xml version="1.0" encoding="UTF-8"?>\n' \
'<urlset\n' \
' xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"\n' \
' xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"\n' \
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n' \
' xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9\n' \
' http://www.sitemaps.org/schemas/sitemap/0.9/' \
'sitemap.xsd">\n'
SITEMAP_FOOTER = '</urlset>\n'
SITEURL_XML_PREFIX = ' <url>\n'
SITEURL_XML_SUFFIX = ' </url>\n'
NEWS_TAG_XML_PREFIX = ' <news:news>\n'
NEWS_TAG_XML_SUFFIX = ' </news:news>\n'
# Search engines to notify with the updated sitemaps
#
# This list is very non-obvious in what's going on. Here's the gist:
# Each item in the list is a 6-tuple of items. The first 5 are "almost"
# the same as the input arguments to urlparse.urlunsplit():
# 0 - schema
# 1 - netloc
# 2 - path
# 3 - query <-- EXCEPTION: specify a query map rather than a string
# 4 - fragment
# Additionally, add item 5:
# 5 - query attribute that should be set to the new Sitemap URL
# Clear as mud, I know.
NOTIFICATION_SITES = [
('http', 'www.google.com', 'webmasters/sitemaps/ping', {}, '', 'sitemap'),
]
class Error(Exception):
"""
Base exception class. In this module we tend not to use our own exception
types for very much, but they come in very handy on XML parsing with SAX.
"""
pass
#end class Error
class SchemaError(Error):
"""Failure to process an XML file according to the schema we know."""
pass
#end class SchemeError
class Encoder:
"""
Manages wide-character/narrow-character conversions for just about all
text that flows into or out of the script.
You should always use this class for string coercion, as opposed to
letting Python handle coercions automatically. Reason: Python
usually assumes ASCII (7-bit) as a default narrow character encoding,
which is not the kind of data we generally deal with.
General high-level methodologies used in sitemap_gen:
[PATHS]
File system paths may be wide or narrow, depending on platform.
This works fine, just be aware of it and be very careful to not
mix them. That is, if you have to pass several file path arguments
into a library call, make sure they are all narrow or all wide.
This class has MaybeNarrowPath() which should be called on every
file system path you deal with.
[URLS]
URL locations are stored in Narrow form, already escaped. This has the
benefit of keeping escaping and encoding as close as possible to the format
we read them in. The downside is we may end up with URLs that have
intermingled encodings -- the root path may be encoded in one way
while the filename is encoded in another. This is obviously wrong, but
it should hopefully be an issue hit by very few users. The workaround
from the user level (assuming they notice) is to specify a default_encoding
parameter in their config file.
[OTHER]
Other text, such as attributes of the URL class, configuration options,
etc, are generally stored in Unicode for simplicity.
"""
def __init__(self):
self._user = None # User-specified default encoding
self._learned = [] # Learned default encodings
self._widefiles = False # File system can be wide
# Can the file system be Unicode?
try:
self._widefiles = os.path.supports_unicode_filenames
except AttributeError:
try:
self._widefiles = sys.getwindowsversion() == os.VER_PLATFORM_WIN32_NT
except AttributeError:
pass
# Try to guess a working default
try:
encoding = sys.getfilesystemencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
except AttributeError:
pass
if not self._learned:
encoding = sys.getdefaultencoding()
if encoding and not (encoding.upper() in ENC_ASCII_LIST):
self._learned = [ encoding ]
# If we had no guesses, start with some European defaults
if not self._learned:
self._learned = ENC_DEFAULT_LIST
#end def __init__
def SetUserEncoding(self, encoding):
self._user = encoding
#end def SetUserEncoding
def NarrowText(self, text, encoding):
""" Narrow a piece of arbitrary text """
if type(text) != types.UnicodeType:
return text
# Try the passed in preference
if encoding:
try:
result = text.encode(encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return text.encode(self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return text.encode(self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return text.encode(ENC_UTF8)
except UnicodeError:
pass
# Something is seriously wrong if we get to here
return text.encode(ENC_ASCII, 'ignore')
#end def NarrowText
def MaybeNarrowPath(self, text):
""" Paths may be allowed to stay wide """
if self._widefiles:
return text
return self.NarrowText(text, None)
#end def MaybeNarrowPath
def WidenText(self, text, encoding):
""" Widen a piece of arbitrary text """
if type(text) != types.StringType:
return text
# Try the passed in preference
if encoding:
try:
result = unicode(text, encoding)
if not encoding in self._learned:
self._learned.append(encoding)
return result
except UnicodeError:
pass
except LookupError:
output.Warn('Unknown encoding: %s' % encoding)
# Try the user preference
if self._user:
try:
return unicode(text, self._user)
except UnicodeError:
pass
except LookupError:
temp = self._user
self._user = None
output.Warn('Unknown default_encoding: %s' % temp)
# Look through learned defaults, knock any failing ones out of the list
while self._learned:
try:
return unicode(text, self._learned[0])
except:
del self._learned[0]
# When all other defaults are exhausted, use UTF-8
try:
return unicode(text, ENC_UTF8)
except UnicodeError:
pass
# Getting here means it wasn't UTF-8 and we had no working default.
# We really don't have anything "right" we can do anymore.
output.Warn('Unrecognized encoding in text: %s' % text)
if not self._user:
output.Warn('You may need to set a default_encoding in your '
'configuration file.')
return text.decode(ENC_ASCII, 'ignore')
#end def WidenText
#end class Encoder
encoder = Encoder()
class Output:
"""
Exposes logging functionality, and tracks how many errors
we have thus output.
Logging levels should be used as thus:
Fatal -- extremely sparingly
Error -- config errors, entire blocks of user 'intention' lost
Warn -- individual URLs lost
Log(,0) -- Un-suppressable text that's not an error
Log(,1) -- touched files, major actions
Log(,2) -- parsing notes, filtered or duplicated URLs
Log(,3) -- each accepted URL
"""
def __init__(self):
self.num_errors = 0 # Count of errors
self.num_warns = 0 # Count of warnings
self._errors_shown = {} # Shown errors
self._warns_shown = {} # Shown warnings
self._verbose = 0 # Level of verbosity
#end def __init__
def Log(self, text, level):
""" Output a blurb of diagnostic text, if the verbose level allows it """
if text:
text = encoder.NarrowText(text, None)
if self._verbose >= level:
print text
#end def Log
def Warn(self, text):
""" Output and count a warning. Suppress duplicate warnings. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._warns_shown.has_key(hash):
self._warns_shown[hash] = 1
print '[WARNING] ' + text
else:
self.Log('(suppressed) [WARNING] ' + text, 3)
self.num_warns = self.num_warns + 1
#end def Warn
def Error(self, text):
""" Output and count an error. Suppress duplicate errors. """
if text:
text = encoder.NarrowText(text, None)
hash = md5.new(text).digest()
if not self._errors_shown.has_key(hash):
self._errors_shown[hash] = 1
print '[ERROR] ' + text
else:
self.Log('(suppressed) [ERROR] ' + text, 3)
self.num_errors = self.num_errors + 1
#end def Error
def Fatal(self, text):
""" Output an error and terminate the program. """
if text:
text = encoder.NarrowText(text, None)
print '[FATAL] ' + text
else:
print 'Fatal error.'
sys.exit(1)
#end def Fatal
def SetVerbose(self, level):
""" Sets the verbose level. """
try:
if type(level) != types.IntType:
level = int(level)
if (level >= 0) and (level <= 3):
self._verbose = level
return
except ValueError:
pass
self.Error('Verbose level (%s) must be between 0 and 3 inclusive.' % level)
#end def SetVerbose
#end class Output
output = Output()
class URL(object):
""" URL is a smart structure grouping together the properties we
care about for a single web reference. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority'
def __init__(self):
self.loc = None # URL -- in Narrow characters
self.lastmod = None # ISO8601 timestamp of last modify
self.changefreq = None # Text term for update frequency
self.priority = None # Float between 0 and 1 (inc)
#end def __init__
def __cmp__(self, other):
if self.loc < other.loc:
return -1
if self.loc > other.loc:
return 1
return 0
#end def __cmp__
def TrySetAttribute(self, attribute, value):
""" Attempt to set the attribute to the value, with a pretty try
block around it. """
if attribute == 'loc':
self.loc = self.Canonicalize(value)
else:
try:
setattr(self, attribute, value)
except AttributeError:
output.Warn('Unknown URL attribute: %s' % attribute)
#end def TrySetAttribute
def IsAbsolute(loc):
""" Decide if the URL is absolute or not """
if not loc:
return False
narrow = encoder.NarrowText(loc, None)
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
if (not scheme) or (not netloc):
return False
return True
#end def IsAbsolute
IsAbsolute = staticmethod(IsAbsolute)
def Canonicalize(loc):
""" Do encoding and canonicalization on a URL string """
if not loc:
return loc
# Let the encoder try to narrow it
narrow = encoder.NarrowText(loc, None)
# Escape components individually
(scheme, netloc, path, query, frag) = urlparse.urlsplit(narrow)
unr = '-._~'
sub = '!$&\'()*+,;='
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
path = urllib.quote(path, unr + sub + '%:@/')
query = urllib.quote(query, unr + sub + '%:@/?')
frag = urllib.quote(frag, unr + sub + '%:@/?')
# Try built-in IDNA encoding on the netloc
try:
(ignore, widenetloc, ignore, ignore, ignore) = urlparse.urlsplit(loc)
for c in widenetloc:
if c >= unichr(128):
netloc = widenetloc.encode(ENC_IDNA)
netloc = urllib.quote(netloc, unr + sub + '%:@/[]')
break
except UnicodeError:
# urlsplit must have failed, based on implementation differences in the
# library. There is not much we can do here, except ignore it.
pass
except LookupError:
output.Warn('An International Domain Name (IDN) is being used, but this '
'version of Python does not have support for IDNA encoding. '
' (IDNA support was introduced in Python 2.3) The encoding '
'we have used instead is wrong and will probably not yield '
'valid URLs.')
bad_netloc = False
if '%' in netloc:
bad_netloc = True
# Put it all back together
narrow = urlparse.urlunsplit((scheme, netloc, path, query, frag))
# I let '%' through. Fix any that aren't pre-existing escapes.
HEXDIG = '0123456789abcdefABCDEF'
list = narrow.split('%')
narrow = list[0]
del list[0]
for item in list:
if (len(item) >= 2) and (item[0] in HEXDIG) and (item[1] in HEXDIG):
narrow = narrow + '%' + item
else:
narrow = narrow + '%25' + item
# Issue a warning if this is a bad URL
if bad_netloc:
output.Warn('Invalid characters in the host or domain portion of a URL: '
+ narrow)
return narrow
#end def Canonicalize
Canonicalize = staticmethod(Canonicalize)
def VerifyDate(self, date, metatag):
"""Verify the date format is valid"""
match = False
if date:
date = date.upper()
for pattern in DATE_PATTERNS:
match = pattern.match(date)
if match:
return True
if not match:
output.Warn('The value for %s does not appear to be in ISO8601 '
'format on URL: %s' % (metatag, self.loc))
return False
#end of VerifyDate
def Validate(self, base_url, allow_fragment):
""" Verify the data in this URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
# Test (and normalize) the ref
if not self.loc:
output.Warn('Empty URL')
return False
if allow_fragment:
self.loc = urlparse.urljoin(base_url, self.loc)
if not self.loc.startswith(base_url):
output.Warn('Discarded URL for not starting with the base_url: %s' %
self.loc)
self.loc = None
return False
# Test the lastmod
if self.lastmod:
if not self.VerifyDate(self.lastmod, "lastmod"):
self.lastmod = None
# Test the changefreq
if self.changefreq:
match = False
self.changefreq = self.changefreq.lower()
for pattern in CHANGEFREQ_PATTERNS:
if self.changefreq == pattern:
match = True
break
if not match:
output.Warn('Changefreq "%s" is not a valid change frequency on URL '
': %s' % (self.changefreq, self.loc))
self.changefreq = None
# Test the priority
if self.priority:
priority = -1.0
try:
priority = float(self.priority)
except ValueError:
pass
if (priority < 0.0) or (priority > 1.0):
output.Warn('Priority "%s" is not a number between 0 and 1 inclusive '
'on URL: %s' % (self.priority, self.loc))
self.priority = None
return True
#end def Validate
def MakeHash(self):
""" Provides a uniform way of hashing URLs """
if not self.loc:
return None
if self.loc.endswith('/'):
return md5.new(self.loc[:-1]).digest()
return md5.new(self.loc).digest()
#end def MakeHash
def Log(self, prefix='URL', level=3):
""" Dump the contents, empty or not, to the log. """
out = prefix + ':'
for attribute in self.__slots__:
value = getattr(self, attribute)
if not value:
value = ''
out = out + (' %s=[%s]' % (attribute, value))
output.Log('%s' % encoder.NarrowText(out, None), level)
#end def Log
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class URL
class NewsURL(URL):
""" NewsURL is a subclass of URL with News-Sitemap specific properties. """
__slots__ = 'loc', 'lastmod', 'changefreq', 'priority', 'publication_date', \
'keywords', 'stock_tickers'
def __init__(self):
URL.__init__(self)
self.publication_date = None # ISO8601 timestamp of publication date
self.keywords = None # Text keywords
self.stock_tickers = None # Text stock
#end def __init__
def Validate(self, base_url, allow_fragment):
""" Verify the data in this News URL is well-formed, and override if not. """
assert type(base_url) == types.StringType
if not URL.Validate(self, base_url, allow_fragment):
return False
if not URL.VerifyDate(self, self.publication_date, "publication_date"):
self.publication_date = None
return True
#end def Validate
def WriteXML(self, file):
""" Dump non-empty contents to the output file, in XML format. """
if not self.loc:
return
out = SITEURL_XML_PREFIX
# printed_news_tag indicates if news-specific metatags are present
printed_news_tag = False
for attribute in self.__slots__:
value = getattr(self, attribute)
if value:
if type(value) == types.UnicodeType:
value = encoder.NarrowText(value, None)
elif type(value) != types.StringType:
value = str(value)
value = xml.sax.saxutils.escape(value)
if attribute in NEWS_SPECIFIC_TAGS:
if not printed_news_tag:
printed_news_tag = True
out = out + NEWS_TAG_XML_PREFIX
out = out + (' <news:%s>%s</news:%s>\n' % (attribute, value, attribute))
else:
out = out + (' <%s>%s</%s>\n' % (attribute, value, attribute))
if printed_news_tag:
out = out + NEWS_TAG_XML_SUFFIX
out = out + SITEURL_XML_SUFFIX
file.write(out)
#end def WriteXML
#end class NewsURL
class Filter:
"""
A filter on the stream of URLs we find. A filter is, in essence,
a wildcard applied to the stream. You can think of this as an
operator that returns a tri-state when given a URL:
True -- this URL is to be included in the sitemap
None -- this URL is undecided
False -- this URL is to be dropped from the sitemap
"""
def __init__(self, attributes):
self._wildcard = None # Pattern for wildcard match
self._regexp = None # Pattern for regexp match
self._pass = False # "Drop" filter vs. "Pass" filter
if not ValidateAttributes('FILTER', attributes,
('pattern', 'type', 'action')):
return
# Check error count on the way in
num_errors = output.num_errors
# Fetch the attributes
pattern = attributes.get('pattern')
type = attributes.get('type', 'wildcard')
action = attributes.get('action', 'drop')
if type:
type = type.lower()
if action:
action = action.lower()
# Verify the attributes
if not pattern:
output.Error('On a filter you must specify a "pattern" to match')
elif (not type) or ((type != 'wildcard') and (type != 'regexp')):
output.Error('On a filter you must specify either \'type="wildcard"\' '
'or \'type="regexp"\'')
elif (action != 'pass') and (action != 'drop'):
output.Error('If you specify a filter action, it must be either '
'\'action="pass"\' or \'action="drop"\'')
# Set the rule
if action == 'drop':
self._pass = False
elif action == 'pass':
self._pass = True
if type == 'wildcard':
self._wildcard = pattern
elif type == 'regexp':
try:
self._regexp = re.compile(pattern)
except re.error:
output.Error('Bad regular expression: %s' % pattern)
# Log the final results iff we didn't add any errors
if num_errors == output.num_errors:
output.Log('Filter: %s any URL that matches %s "%s"' %
(action, type, pattern), 2)
#end def __init__
def Apply(self, url):
""" Process the URL, as above. """
if (not url) or (not url.loc):
return None
if self._wildcard:
if fnmatch.fnmatchcase(url.loc, self._wildcard):
return self._pass
return None
if self._regexp:
if self._regexp.search(url.loc):
return self._pass
return None
assert False # unreachable
#end def Apply
#end class Filter
class InputURL:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a single URL, manually specified in the config file.
"""
def __init__(self, attributes):
self._url = None # The lonely URL
if not ValidateAttributes('URL', attributes,
('href', 'lastmod', 'changefreq', 'priority')):
return
url = URL()
for attr in attributes.keys():
if attr == 'href':
url.TrySetAttribute('loc', attributes[attr])
else:
url.TrySetAttribute(attr, attributes[attr])
if not url.loc:
output.Error('Url entries must have an href attribute.')
return
self._url = url
output.Log('Input: From URL "%s"' % self._url.loc, 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if self._url:
consumer(self._url, True)
#end def ProduceURLs
#end class InputURL
class InputURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of URLs
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on space
url = URL()
cols = line.split(' ')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
url.TrySetAttribute('loc', cols[0])
# Extract attributes from the other columns
for i in range(1,len(cols)):
if cols[i]:
try:
(attr_name, attr_val) = cols[i].split('=', 1)
url.TrySetAttribute(attr_name, attr_val)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputURLList
class InputNewsURLList:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a text file with a list of News URLs and their metadata
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._tag_order = [] # Order of URL metadata
if not ValidateAttributes('URLLIST', attributes, ('path', 'encoding', \
'tag_order')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
self._tag_order = attributes.get('tag_order')
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From URLLIST "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Urllist entries must have a "path" attribute.')
# parse tag_order into an array
# tag_order_ascii created for more readable logging
tag_order_ascii = []
if self._tag_order:
self._tag_order = self._tag_order.split(",")
for i in range(0, len(self._tag_order)):
element = self._tag_order[i].strip().lower()
self._tag_order[i]= element
tag_order_ascii.append(element.encode('ascii'))
output.Log('Input: From URLLIST tag order is "%s"' % tag_order_ascii, 0)
else:
output.Error('News Urllist configuration file must contain tag_order '
'to define Sitemap metatags.')
# verify all tag_order inputs are valid
tag_order_dict = {}
for tag in self._tag_order:
tag_order_dict[tag] = ""
if not ValidateAttributes('URLLIST', tag_order_dict, \
NEWS_SITEMAP_TAGS):
return
# loc tag must be present
loc_tag = False
for tag in self._tag_order:
if tag == 'loc':
loc_tag = True
break
if not loc_tag:
output.Error('News Urllist tag_order in configuration file '
'does not contain "loc" value: %s' % tag_order_ascii)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'URLLIST')
if not file:
return
# Iterate lines
linenum = 0
for line in file.readlines():
linenum = linenum + 1
# Strip comments and empty lines
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
if (not line) or line[0] == '#':
continue
# Split the line on tabs
url = NewsURL()
cols = line.split('\t')
for i in range(0,len(cols)):
cols[i] = cols[i].strip()
for i in range(0,len(cols)):
if cols[i]:
attr_value = cols[i]
if i < len(self._tag_order):
attr_name = self._tag_order[i]
try:
url.TrySetAttribute(attr_name, attr_value)
except ValueError:
output.Warn('Line %d: Unable to parse attribute: %s' %
(linenum, cols[i]))
# Pass it on
consumer(url, False)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputNewsURLList
class InputDirectory:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles a directory that acts as base for walking the filesystem.
"""
def __init__(self, attributes, base_url):
self._path = None # The directory
self._url = None # The URL equivalent
self._default_file = None
self._remove_empty_directories = False
if not ValidateAttributes('DIRECTORY', attributes, ('path', 'url',
'default_file', 'remove_empty_directories')):
return
# Prep the path -- it MUST end in a sep
path = attributes.get('path')
if not path:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
path = encoder.MaybeNarrowPath(path)
if not path.endswith(os.sep):
path = path + os.sep
if not os.path.isdir(path):
output.Error('Can not locate directory: %s' % path)
return
# Prep the URL -- it MUST end in a sep
url = attributes.get('url')
if not url:
output.Error('Directory entries must have both "path" and "url" '
'attributes')
return
url = URL.Canonicalize(url)
if not url.endswith('/'):
url = url + '/'
if not url.startswith(base_url):
url = urlparse.urljoin(base_url, url)
if not url.startswith(base_url):
output.Error('The directory URL "%s" is not relative to the '
'base_url: %s' % (url, base_url))
return
# Prep the default file -- it MUST be just a filename
file = attributes.get('default_file')
if file:
file = encoder.MaybeNarrowPath(file)
if os.sep in file:
output.Error('The default_file "%s" can not include path information.'
% file)
file = None
# Prep the remove_empty_directories -- default is false
remove_empty_directories = attributes.get('remove_empty_directories')
if remove_empty_directories:
if (remove_empty_directories == '1') or \
(remove_empty_directories.lower() == 'true'):
remove_empty_directories = True
elif (remove_empty_directories == '0') or \
(remove_empty_directories.lower() == 'false'):
remove_empty_directories = False
# otherwise the user set a non-default value
else:
output.Error('Configuration file remove_empty_directories '
'value is not recognized. Value must be true or false.')
return
else:
remove_empty_directories = False
self._path = path
self._url = url
self._default_file = file
self._remove_empty_directories = remove_empty_directories
if file:
output.Log('Input: From DIRECTORY "%s" (%s) with default file "%s"'
% (path, url, file), 2)
else:
output.Log('Input: From DIRECTORY "%s" (%s) with no default file'
% (path, url), 2)
#end def __init__
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
if not self._path:
return
root_path = self._path
root_URL = self._url
root_file = self._default_file
remove_empty_directories = self._remove_empty_directories
def HasReadPermissions(path):
""" Verifies a given path has read permissions. """
stat_info = os.stat(path)
mode = stat_info[stat.ST_MODE]
if mode & stat.S_IREAD:
return True
else:
return None
def PerFile(dirpath, name):
"""
Called once per file.
Note that 'name' will occasionally be None -- for a directory itself
"""
# Pull a timestamp
url = URL()
isdir = False
try:
if name:
path = os.path.join(dirpath, name)
else:
path = dirpath
isdir = os.path.isdir(path)
time = None
if isdir and root_file:
file = os.path.join(path, root_file)
try:
time = os.stat(file)[stat.ST_MTIME];
except OSError:
pass
if not time:
time = os.stat(path)[stat.ST_MTIME];
url.lastmod = TimestampISO8601(time)
except OSError:
pass
except ValueError:
pass
# Build a URL
middle = dirpath[len(root_path):]
if os.sep != '/':
middle = middle.replace(os.sep, '/')
if middle:
middle = middle + '/'
if name:
middle = middle + name
if isdir:
middle = middle + '/'
url.TrySetAttribute('loc', root_URL + encoder.WidenText(middle, None))
# Suppress default files. (All the way down here so we can log it.)
if name and (root_file == name):
url.Log(prefix='IGNORED (default file)', level=2)
return
# Suppress directories when remove_empty_directories="true"
try:
if isdir:
if HasReadPermissions(path):
if remove_empty_directories == 'true' and \
len(os.listdir(path)) == 0:
output.Log('IGNORED empty directory %s' % str(path), level=1)
return
elif path == self._path:
output.Error('IGNORED configuration file directory input %s due '
'to file permissions' % self._path)
else:
output.Log('IGNORED files within directory %s due to file '
'permissions' % str(path), level=0)
except OSError:
pass
except ValueError:
pass
consumer(url, False)
#end def PerFile
def PerDirectory(ignore, dirpath, namelist):
"""
Called once per directory with a list of all the contained files/dirs.
"""
ignore = ignore # Avoid warnings of an unused parameter
if not dirpath.startswith(root_path):
output.Warn('Unable to decide what the root path is for directory: '
'%s' % dirpath)
return
for name in namelist:
PerFile(dirpath, name)
#end def PerDirectory
output.Log('Walking DIRECTORY "%s"' % self._path, 1)
PerFile(self._path, None)
os.path.walk(self._path, PerDirectory, None)
#end def ProduceURLs
#end class InputDirectory
class InputAccessLog:
"""
Each Input class knows how to yield a set of URLs from a data source.
This one handles access logs. It's non-trivial in that we want to
auto-detect log files in the Common Logfile Format (as used by Apache,
for instance) and the Extended Log File Format (as used by IIS, for
instance).
"""
def __init__(self, attributes):
self._path = None # The file path
self._encoding = None # Encoding of that file
self._is_elf = False # Extended Log File Format?
self._is_clf = False # Common Logfile Format?
self._elf_status = -1 # ELF field: '200'
self._elf_method = -1 # ELF field: 'HEAD'
self._elf_uri = -1 # ELF field: '/foo?bar=1'
self._elf_urifrag1 = -1 # ELF field: '/foo'
self._elf_urifrag2 = -1 # ELF field: 'bar=1'
if not ValidateAttributes('ACCESSLOG', attributes, ('path', 'encoding')):
return
self._path = attributes.get('path')
self._encoding = attributes.get('encoding', ENC_UTF8)
if self._path:
self._path = encoder.MaybeNarrowPath(self._path)
if os.path.isfile(self._path):
output.Log('Input: From ACCESSLOG "%s"' % self._path, 2)
else:
output.Error('Can not locate file: %s' % self._path)
self._path = None
else:
output.Error('Accesslog entries must have a "path" attribute.')
#end def __init__
def RecognizeELFLine(self, line):
""" Recognize the Fields directive that heads an ELF file """
if not line.startswith('#Fields:'):
return False
fields = line.split(' ')
del fields[0]
for i in range(0, len(fields)):
field = fields[i].strip()
if field == 'sc-status':
self._elf_status = i
elif field == 'cs-method':
self._elf_method = i
elif field == 'cs-uri':
self._elf_uri = i
elif field == 'cs-uri-stem':
self._elf_urifrag1 = i
elif field == 'cs-uri-query':
self._elf_urifrag2 = i
output.Log('Recognized an Extended Log File Format file.', 2)
return True
#end def RecognizeELFLine
def GetELFLine(self, line):
""" Fetch the requested URL from an ELF line """
fields = line.split(' ')
count = len(fields)
# Verify status was Ok
if self._elf_status >= 0:
if self._elf_status >= count:
return None
if not fields[self._elf_status].strip() == '200':
return None
# Verify method was HEAD or GET
if self._elf_method >= 0:
if self._elf_method >= count:
return None
if not fields[self._elf_method].strip() in ('HEAD', 'GET'):
return None
# Pull the full URL if we can
if self._elf_uri >= 0:
if self._elf_uri >= count:
return None
url = fields[self._elf_uri].strip()
if url != '-':
return url
# Put together a fragmentary URL
if self._elf_urifrag1 >= 0:
if self._elf_urifrag1 >= count or self._elf_urifrag2 >= count:
return None
urlfrag1 = fields[self._elf_urifrag1].strip()
urlfrag2 = None
if self._elf_urifrag2 >= 0:
urlfrag2 = fields[self._elf_urifrag2]
if urlfrag1 and (urlfrag1 != '-'):
if urlfrag2 and (urlfrag2 != '-'):
urlfrag1 = urlfrag1 + '?' + urlfrag2
return urlfrag1
return None
#end def GetELFLine
def RecognizeCLFLine(self, line):
""" Try to tokenize a logfile line according to CLF pattern and see if
it works. """
match = ACCESSLOG_CLF_PATTERN.match(line)
recognize = match and (match.group(1) in ('HEAD', 'GET'))
if recognize:
output.Log('Recognized a Common Logfile Format file.', 2)
return recognize
#end def RecognizeCLFLine
def GetCLFLine(self, line):
""" Fetch the requested URL from a CLF line """
match = ACCESSLOG_CLF_PATTERN.match(line)
if match:
request = match.group(1)
if request in ('HEAD', 'GET'):
return match.group(2)
return None
#end def GetCLFLine
def ProduceURLs(self, consumer):
""" Produces URLs from our data source, hands them in to the consumer. """
# Open the file
(frame, file) = OpenFileForRead(self._path, 'ACCESSLOG')
if not file:
return
# Iterate lines
for line in file.readlines():
if self._encoding:
line = encoder.WidenText(line, self._encoding)
line = line.strip()
# If we don't know the format yet, try them both
if (not self._is_clf) and (not self._is_elf):
self._is_elf = self.RecognizeELFLine(line)
self._is_clf = self.RecognizeCLFLine(line)
# Digest the line
match = None
if self._is_elf:
match = self.GetELFLine(line)
elif self._is_clf:
match = self.GetCLFLine(line)
if not match:
continue
# Pass it on
url = URL()
url.TrySetAttribute('loc', match)
consumer(url, True)
file.close()
if frame:
frame.close()
#end def ProduceURLs
#end class InputAccessLog
class FilePathGenerator:
"""
This class generates filenames in a series, upon request.
You can request any iteration number at any time, you don't
have to go in order.
Example of iterations for '/path/foo.xml.gz':
0 --> /path/foo.xml.gz
1 --> /path/foo1.xml.gz
2 --> /path/foo2.xml.gz
_index.xml --> /path/foo_index.xml
"""
def __init__(self):
self.is_gzip = False # Is this a GZIP file?
self._path = None # '/path/'
self._prefix = None # 'foo'
self._suffix = None # '.xml.gz'
#end def __init__
def Preload(self, path):
""" Splits up a path into forms ready for recombination. """
path = encoder.MaybeNarrowPath(path)
# Get down to a base name
path = os.path.normpath(path)
base = os.path.basename(path).lower()
if not base:
output.Error('Couldn\'t parse the file path: %s' % path)
return False
lenbase = len(base)
# Recognize extension
lensuffix = 0
compare_suffix = ['.xml', '.xml.gz', '.gz']
for suffix in compare_suffix:
if base.endswith(suffix):
lensuffix = len(suffix)
break
if not lensuffix:
output.Error('The path "%s" doesn\'t end in a supported file '
'extension.' % path)
return False
self.is_gzip = suffix.endswith('.gz')
# Split the original path
lenpath = len(path)
self._path = path[:lenpath-lenbase]
self._prefix = path[lenpath-lenbase:lenpath-lensuffix]
self._suffix = path[lenpath-lensuffix:]
return True
#end def Preload
def GeneratePath(self, instance):
""" Generates the iterations, as described above. """
prefix = self._path + self._prefix
if type(instance) == types.IntType:
if instance:
return '%s%d%s' % (prefix, instance, self._suffix)
return prefix + self._suffix
return prefix + instance
#end def GeneratePath
def GenerateURL(self, instance, root_url):
""" Generates iterations, but as a URL instead of a path. """
prefix = root_url + self._prefix
retval = None
if type(instance) == types.IntType:
if instance:
retval = '%s%d%s' % (prefix, instance, self._suffix)
else:
retval = prefix + self._suffix
else:
retval = prefix + instance
return URL.Canonicalize(retval)
#end def GenerateURL
def GenerateWildURL(self, root_url):
""" Generates a wildcard that should match all our iterations """
prefix = URL.Canonicalize(root_url + self._prefix)
temp = URL.Canonicalize(prefix + self._suffix)
suffix = temp[len(prefix):]
return prefix + '*' + suffix
#end def GenerateURL
#end class FilePathGenerator
class PerURLStatistics:
""" Keep track of some simple per-URL statistics, like file extension. """
def __init__(self):
self._extensions = {} # Count of extension instances
#end def __init__
def Consume(self, url):
""" Log some stats for the URL. At the moment, that means extension. """
if url and url.loc:
(scheme, netloc, path, query, frag) = urlparse.urlsplit(url.loc)
if not path:
return
# Recognize directories
if path.endswith('/'):
if self._extensions.has_key('/'):
self._extensions['/'] = self._extensions['/'] + 1
else:
self._extensions['/'] = 1
return
# Strip to a filename
i = path.rfind('/')
if i >= 0:
assert i < len(path)
path = path[i:]
# Find extension
i = path.rfind('.')
if i > 0:
assert i < len(path)
ext = path[i:].lower()
if self._extensions.has_key(ext):
self._extensions[ext] = self._extensions[ext] + 1
else:
self._extensions[ext] = 1
else:
if self._extensions.has_key('(no extension)'):
self._extensions['(no extension)'] = self._extensions[
'(no extension)'] + 1
else:
self._extensions['(no extension)'] = 1
#end def Consume
def Log(self):
""" Dump out stats to the output. """
if len(self._extensions):
output.Log('Count of file extensions on URLs:', 1)
set = self._extensions.keys()
set.sort()
for ext in set:
output.Log(' %7d %s' % (self._extensions[ext], ext), 1)
#end def Log
class Sitemap(xml.sax.handler.ContentHandler):
"""
This is the big workhorse class that processes your inputs and spits
out sitemap files. It is built as a SAX handler for set up purposes.
That is, it processes an XML stream to bring itself up.
"""
def __init__(self, suppress_notify):
xml.sax.handler.ContentHandler.__init__(self)
self._filters = [] # Filter objects
self._inputs = [] # Input objects
self._urls = {} # Maps URLs to count of dups
self._set = [] # Current set of URLs
self._filegen = None # Path generator for output files
self._wildurl1 = None # Sitemap URLs to filter out
self._wildurl2 = None # Sitemap URLs to filter out
self._sitemaps = 0 # Number of output files
# We init _dup_max to 2 so the default priority is 0.5 instead of 1.0
self._dup_max = 2 # Max number of duplicate URLs
self._stat = PerURLStatistics() # Some simple stats
self._in_site = False # SAX: are we in a Site node?
self._in_Site_ever = False # SAX: were we ever in a Site?
self._default_enc = None # Best encoding to try on URLs
self._base_url = None # Prefix to all valid URLs
self._store_into = None # Output filepath
self._sitemap_type = None # Sitemap type (web, mobile or news)
self._suppress = suppress_notify # Suppress notify of servers
#end def __init__
def ValidateBasicConfig(self):
""" Verifies (and cleans up) the basic user-configurable options. """
all_good = True
if self._default_enc:
encoder.SetUserEncoding(self._default_enc)
# Canonicalize the base_url
if all_good and not self._base_url:
output.Error('A site needs a "base_url" attribute.')
all_good = False
if all_good and not URL.IsAbsolute(self._base_url):
output.Error('The "base_url" must be absolute, not relative: %s' %
self._base_url)
all_good = False
if all_good:
self._base_url = URL.Canonicalize(self._base_url)
if not self._base_url.endswith('/'):
self._base_url = self._base_url + '/'
output.Log('BaseURL is set to: %s' % self._base_url, 2)
# Load store_into into a generator
if all_good:
if self._store_into:
self._filegen = FilePathGenerator()
if not self._filegen.Preload(self._store_into):
all_good = False
else:
output.Error('A site needs a "store_into" attribute.')
all_good = False
# Ask the generator for patterns on what its output will look like
if all_good:
self._wildurl1 = self._filegen.GenerateWildURL(self._base_url)
self._wildurl2 = self._filegen.GenerateURL(SITEINDEX_SUFFIX,
self._base_url)
# Unify various forms of False
if all_good:
if self._suppress:
if (type(self._suppress) == types.StringType) or (type(self._suppress)
== types.UnicodeType):
if (self._suppress == '0') or (self._suppress.lower() == 'false'):
self._suppress = False
# Clean up the sitemap_type
if all_good:
match = False
# If sitemap_type is not specified, default to web sitemap
if not self._sitemap_type:
self._sitemap_type = 'web'
else:
self._sitemap_type = self._sitemap_type.lower()
for pattern in SITEMAP_TYPES:
if self._sitemap_type == pattern:
match = True
break
if not match:
output.Error('The "sitemap_type" value must be "web", "mobile" '
'or "news": %s' % self._sitemap_type)
all_good = False
output.Log('The Sitemap type is %s Sitemap.' % \
self._sitemap_type.upper(), 0)
# Done
if not all_good:
output.Log('See "example_config.xml" for more information.', 0)
return all_good
#end def ValidateBasicConfig
def Generate(self):
""" Run over all the Inputs and ask them to Produce """
# Run the inputs
for input in self._inputs:
input.ProduceURLs(self.ConsumeURL)
# Do last flushes
if len(self._set):
self.FlushSet()
if not self._sitemaps:
output.Warn('No URLs were recorded, writing an empty sitemap.')
self.FlushSet()
# Write an index as needed
if self._sitemaps > 1:
self.WriteIndex()
# Notify
self.NotifySearch()
# Dump stats
self._stat.Log()
#end def Generate
def ConsumeURL(self, url, allow_fragment):
"""
All per-URL processing comes together here, regardless of Input.
Here we run filters, remove duplicates, spill to disk as needed, etc.
"""
if not url:
return
# Validate
if not url.Validate(self._base_url, allow_fragment):
return
# Run filters
accept = None
for filter in self._filters:
accept = filter.Apply(url)
if accept != None:
break
if not (accept or (accept == None)):
url.Log(prefix='FILTERED', level=2)
return
# Ignore our out output URLs
if fnmatch.fnmatchcase(url.loc, self._wildurl1) or fnmatch.fnmatchcase(
url.loc, self._wildurl2):
url.Log(prefix='IGNORED (output file)', level=2)
return
# Note the sighting
hash = url.MakeHash()
if self._urls.has_key(hash):
dup = self._urls[hash]
if dup > 0:
dup = dup + 1
self._urls[hash] = dup
if self._dup_max < dup:
self._dup_max = dup
url.Log(prefix='DUPLICATE')
return
# Acceptance -- add to set
self._urls[hash] = 1
self._set.append(url)
self._stat.Consume(url)
url.Log()
# Flush the set if needed
if len(self._set) >= MAXURLS_PER_SITEMAP:
self.FlushSet()
#end def ConsumeURL
def FlushSet(self):
"""
Flush the current set of URLs to the output. This is a little
slow because we like to sort them all and normalize the priorities
before dumping.
"""
# Determine what Sitemap header to use (News or General)
if self._sitemap_type == 'news':
sitemap_header = NEWS_SITEMAP_HEADER
else:
sitemap_header = GENERAL_SITEMAP_HEADER
# Sort and normalize
output.Log('Sorting and normalizing collected URLs.', 1)
self._set.sort()
for url in self._set:
hash = url.MakeHash()
dup = self._urls[hash]
if dup > 0:
self._urls[hash] = -1
if not url.priority:
url.priority = '%.4f' % (float(dup) / float(self._dup_max))
# Get the filename we're going to write to
filename = self._filegen.GeneratePath(self._sitemaps)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output filename.')
self._sitemaps = self._sitemaps + 1
output.Log('Writing Sitemap file "%s" with %d URLs' %
(filename, len(self._set)), 1)
# Write to it
frame = None
file = None
try:
if self._filegen.is_gzip:
basename = os.path.basename(filename);
frame = open(filename, 'wb')
file = gzip.GzipFile(fileobj=frame, filename=basename, mode='wt')
else:
file = open(filename, 'wt')
file.write(sitemap_header)
for url in self._set:
url.WriteXML(file)
file.write(SITEMAP_FOOTER)
file.close()
if frame:
frame.close()
frame = None
file = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
# Flush
self._set = []
#end def FlushSet
def WriteIndex(self):
""" Write the master index of all Sitemap files """
# Make a filename
filename = self._filegen.GeneratePath(SITEINDEX_SUFFIX)
if not filename:
output.Fatal('Unexpected: Couldn\'t generate output index filename.')
output.Log('Writing index file "%s" with %d Sitemaps' %
(filename, self._sitemaps), 1)
# Determine what Sitemap index header to use (News or General)
if self._sitemap_type == 'news':
sitemap_index_header = NEWS_SITEMAP_HEADER
else:
sitemap__index_header = GENERAL_SITEMAP_HEADER
# Make a lastmod time
lastmod = TimestampISO8601(time.time())
# Write to it
try:
fd = open(filename, 'wt')
fd.write(sitemap_index_header)
for mapnumber in range(0,self._sitemaps):
# Write the entry
mapurl = self._filegen.GenerateURL(mapnumber, self._base_url)
mapattributes = { 'loc' : mapurl, 'lastmod' : lastmod }
fd.write(SITEINDEX_ENTRY % mapattributes)
fd.write(SITEINDEX_FOOTER)
fd.close()
fd = None
except IOError:
output.Fatal('Couldn\'t write out to file: %s' % filename)
os.chmod(filename, 0644)
#end def WriteIndex
def NotifySearch(self):
""" Send notification of the new Sitemap(s) to the search engines. """
if self._suppress:
output.Log('Search engine notification is suppressed.', 1)
return
output.Log('Notifying search engines.', 1)
# Override the urllib's opener class with one that doesn't ignore 404s
class ExceptionURLopener(urllib.FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
output.Log('HTTP error %d: %s' % (errcode, errmsg), 2)
raise IOError
#end def http_error_default
#end class ExceptionURLOpener
old_opener = urllib._urlopener
urllib._urlopener = ExceptionURLopener()
# Build the URL we want to send in
if self._sitemaps > 1:
url = self._filegen.GenerateURL(SITEINDEX_SUFFIX, self._base_url)
else:
url = self._filegen.GenerateURL(0, self._base_url)
# Test if we can hit it ourselves
try:
u = urllib.urlopen(url)
u.close()
except IOError:
output.Error('When attempting to access our generated Sitemap at the '
'following URL:\n %s\n we failed to read it. Please '
'verify the store_into path you specified in\n'
' your configuration file is web-accessable. Consult '
'the FAQ for more\n information.' % url)
output.Warn('Proceeding to notify with an unverifyable URL.')
# Cycle through notifications
# To understand this, see the comment near the NOTIFICATION_SITES comment
for ping in NOTIFICATION_SITES:
query_map = ping[3]
query_attr = ping[5]
query_map[query_attr] = url
query = urllib.urlencode(query_map)
notify = urlparse.urlunsplit((ping[0], ping[1], ping[2], query, ping[4]))
# Send the notification
output.Log('Notifying: %s' % ping[1], 0)
output.Log('Notification URL: %s' % notify, 2)
try:
u = urllib.urlopen(notify)
u.read()
u.close()
except IOError:
output.Warn('Cannot contact: %s' % ping[1])
if old_opener:
urllib._urlopener = old_opener
#end def NotifySearch
def startElement(self, tag, attributes):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
if self._in_site:
output.Error('Can not nest Site entries in the configuration.')
else:
self._in_site = True
if not ValidateAttributes('SITE', attributes,
('verbose', 'default_encoding', 'base_url', 'store_into',
'suppress_search_engine_notify', 'sitemap_type')):
return
verbose = attributes.get('verbose', 0)
if verbose:
output.SetVerbose(verbose)
self._default_enc = attributes.get('default_encoding')
self._base_url = attributes.get('base_url')
self._store_into = attributes.get('store_into')
self._sitemap_type= attributes.get('sitemap_type')
if not self._suppress:
self._suppress = attributes.get('suppress_search_engine_notify',
False)
self.ValidateBasicConfig()
elif tag == 'filter':
self._filters.append(Filter(attributes))
elif tag == 'url':
print type(attributes)
self._inputs.append(InputURL(attributes))
elif tag == 'urllist':
for attributeset in ExpandPathAttribute(attributes, 'path'):
if self._sitemap_type == 'news':
self._inputs.append(InputNewsURLList(attributeset))
else:
self._inputs.append(InputURLList(attributeset))
elif tag == 'directory':
self._inputs.append(InputDirectory(attributes, self._base_url))
elif tag == 'accesslog':
for attributeset in ExpandPathAttribute(attributes, 'path'):
self._inputs.append(InputAccessLog(attributeset))
else:
output.Error('Unrecognized tag in the configuration: %s' % tag)
#end def startElement
def endElement(self, tag):
""" SAX processing, called per node in the config stream. """
if tag == 'site':
assert self._in_site
self._in_site = False
self._in_site_ever = True
#end def endElement
def endDocument(self):
""" End of SAX, verify we can proceed. """
if not self._in_site_ever:
output.Error('The configuration must specify a "site" element.')
else:
if not self._inputs:
output.Warn('There were no inputs to generate a sitemap from.')
#end def endDocument
#end class Sitemap
def ValidateAttributes(tag, attributes, goodattributes):
""" Makes sure 'attributes' does not contain any attribute not
listed in 'goodattributes' """
all_good = True
for attr in attributes.keys():
if not attr in goodattributes:
output.Error('Unknown %s attribute: %s' % (tag, attr))
all_good = False
return all_good
#end def ValidateAttributes
def ExpandPathAttribute(src, attrib):
""" Given a dictionary of attributes, return a list of dictionaries
with all the same attributes except for the one named attrib.
That one, we treat as a file path and expand into all its possible
variations. """
# Do the path expansion. On any error, just return the source dictionary.
path = src.get(attrib)
if not path:
return [src]
path = encoder.MaybeNarrowPath(path);
pathlist = glob.glob(path)
if not pathlist:
return [src]
# If this isn't actually a dictionary, make it one
if type(src) != types.DictionaryType:
tmp = {}
for key in src.keys():
tmp[key] = src[key]
src = tmp
# Create N new dictionaries
retval = []
for path in pathlist:
dst = src.copy()
dst[attrib] = path
retval.append(dst)
return retval
#end def ExpandPathAttribute
def OpenFileForRead(path, logtext):
""" Opens a text file, be it GZip or plain """
frame = None
file = None
if not path:
return (frame, file)
try:
if path.endswith('.gz'):
frame = open(path, 'rb')
file = gzip.GzipFile(fileobj=frame, mode='rt')
else:
file = open(path, 'rt')
if logtext:
output.Log('Opened %s file: %s' % (logtext, path), 1)
else:
output.Log('Opened file: %s' % path, 1)
except IOError:
output.Error('Can not open file: %s' % path)
return (frame, file)
#end def OpenFileForRead
def TimestampISO8601(t):
"""Seconds since epoch (1970-01-01) --> ISO 8601 time string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))
#end def TimestampISO8601
def CreateSitemapFromFile(configpath, suppress_notify):
""" Sets up a new Sitemap object from the specified configuration file. """
# Remember error count on the way in
num_errors = output.num_errors
# Rev up SAX to parse the config
sitemap = Sitemap(suppress_notify)
try:
output.Log('Reading configuration file: %s' % configpath, 0)
xml.sax.parse(configpath, sitemap)
except IOError:
output.Error('Cannot read configuration file: %s' % configpath)
except xml.sax._exceptions.SAXParseException, e:
output.Error('XML error in the config file (line %d, column %d): %s' %
(e._linenum, e._colnum, e.getMessage()))
except xml.sax._exceptions.SAXReaderNotAvailable:
output.Error('Some installs of Python 2.2 did not include complete support'
' for XML.\n Please try upgrading your version of Python'
' and re-running the script.')
# If we added any errors, return no sitemap
if num_errors == output.num_errors:
return sitemap
return None
#end def CreateSitemapFromFile
def ProcessCommandFlags(args):
"""
Parse command line flags per specified usage, pick off key, value pairs
All flags of type "--key=value" will be processed as __flags[key] = value,
"--option" will be processed as __flags[option] = option
"""
flags = {}
rkeyval = '--(?P<key>\S*)[=](?P<value>\S*)' # --key=val
roption = '--(?P<option>\S*)' # --key
r = '(' + rkeyval + ')|(' + roption + ')'
rc = re.compile(r)
for a in args:
try:
rcg = rc.search(a).groupdict()
if rcg.has_key('key'):
flags[rcg['key']] = rcg['value']
if rcg.has_key('option'):
flags[rcg['option']] = rcg['option']
except AttributeError:
return None
return flags
#end def ProcessCommandFlags
#
# __main__
#
if __name__ == '__main__':
flags = ProcessCommandFlags(sys.argv[1:])
if not flags or not flags.has_key('config') or flags.has_key('help'):
output.Log(__usage__, 0)
else:
suppress_notify = flags.has_key('testing')
sitemap = CreateSitemapFromFile(flags['config'], suppress_notify)
if not sitemap:
output.Log('Configuration file errors -- exiting.', 0)
else:
sitemap.Generate()
output.Log('Number of errors: %d' % output.num_errors, 1)
output.Log('Number of warnings: %d' % output.num_warns, 1)
| {
"content_hash": "dac65af163854ceeedc833fe3263e09a",
"timestamp": "",
"source": "github",
"line_count": 2052,
"max_line_length": 81,
"avg_line_length": 31.841130604288498,
"alnum_prop": 0.6016560041629679,
"repo_name": "openprocurement/restkit",
"id": "e270a39d9cc75b78b6404dcba57f9e6bccd3d0b7",
"size": "67145",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "doc/sitemap_gen.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "187249"
}
],
"symlink_target": ""
} |
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def test_config_file_parser(self):
# Assume node is stopped
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Invalid parameter -dash_cli',
extra_args=['-dash_cli=1'],
)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('dash_conf=1\n')
with self.nodes[0].assert_debug_log(expected_msgs=['Ignoring unknown configuration value dash_conf']):
self.start_node(0)
self.stop_node(0)
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
if self.is_wallet_compiled():
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on %s network when in [%s] section.' % (self.chain, self.chain))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('regtest=0\n') # mainnet
conf.write('acceptnonstdtxn=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'dash.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file2_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def test_log_buffer(self):
with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -connect=0\n']):
self.start_node(0, extra_args=['-noconnect=0'])
self.stop_node(0)
def test_args_log(self):
self.log.info('Test config args logging')
with self.nodes[0].assert_debug_log(
expected_msgs=[
'Command-line arg: addnode="some.node"',
'Command-line arg: rpcauth=****',
'Command-line arg: rpcbind=****',
'Command-line arg: rpcpassword=****',
'Command-line arg: rpcuser=****',
'Command-line arg: torpassword=****',
'Config file arg: %s="1"' % self.chain,
'Config file arg: [%s] server="1"' % self.chain,
],
unexpected_msgs=[
'alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'127.1.1.1',
'secret-rpcuser',
'secret-torpassword',
]):
self.start_node(0, extra_args=[
'-addnode=some.node',
'-rpcauth=alice:f7efda5c189b999524f151318c0c86$d5b51b3beffbc0',
'-rpcbind=127.1.1.1',
'-rpcpassword=',
'-rpcuser=secret-rpcuser',
'-torpassword=secret-torpassword',
])
self.stop_node(0)
def run_test(self):
self.stop_node(0)
self.test_log_buffer()
self.test_args_log()
self.test_config_file_parser()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "dash.conf")
# datadir needs to be set before [chain] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error: Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
# Temporarily disabled, because this test would access the user's home dir (~/.bitcoin)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir, self.chain, 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir_2, self.chain, 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
| {
"content_hash": "3cf489b376b723a040ccf61e9d78f5f0",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 207,
"avg_line_length": 50.99382716049383,
"alnum_prop": 0.6140903038373078,
"repo_name": "dashpay/dash",
"id": "aea8a81e92c2864c3bd32bb15c0254cbd238d4b0",
"size": "8470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/feature_config_args.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1866352"
},
{
"name": "C++",
"bytes": "9729795"
},
{
"name": "CMake",
"bytes": "32255"
},
{
"name": "CSS",
"bytes": "113028"
},
{
"name": "Dockerfile",
"bytes": "6344"
},
{
"name": "GDB",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "235904"
},
{
"name": "Makefile",
"bytes": "128711"
},
{
"name": "Objective-C++",
"bytes": "5478"
},
{
"name": "Python",
"bytes": "1899906"
},
{
"name": "QMake",
"bytes": "1389"
},
{
"name": "Sage",
"bytes": "39795"
},
{
"name": "Shell",
"bytes": "134642"
}
],
"symlink_target": ""
} |
from traits.api import HasTraits, File
from traitsui.api import FileEditor, Item, View
class FileEditorTest(HasTraits):
filename_simple = File
filename_custom = File
filename_filtered = File
view = View( Item('filename_simple', style='simple'),
Item('filename_custom', style='custom'),
Item('filename_filtered', editor=FileEditor(filter=['*', '*.dat', '*.gr']),
style='simple'),
)
fed = FileEditorTest()
fed.configure_traits()
| {
"content_hash": "4a9475ec8315cbfb62ef58752ef3f6dd",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 92,
"avg_line_length": 33.6875,
"alnum_prop": 0.5825602968460112,
"repo_name": "marshallmcdonnell/interactive_plotting",
"id": "b43cbbf10255266370ebfc0b0ea0df75693ba9ca",
"size": "562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TraitsUI/manual/file_editor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "210205"
}
],
"symlink_target": ""
} |
import pyaf.Bench.YahooStocks as ys
import pyaf.Bench.TS_datasets as tsds
tester7 = ys.cYahoo_Tester(tsds.load_yahoo_stock_prices("my_test") , "YAHOO_my_test");
tester7.testAllSignals(12);
# tester7.run_multiprocessed(18);
| {
"content_hash": "f11ab669fdfb2ab611240f1a7da08671",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 86,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7678571428571429,
"repo_name": "antoinecarme/pyaf",
"id": "6d2e79f1eb7561ff7e49408c4ac643c8b2f173ae",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/bench/test_yahoo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import re
from validators import KeywordValidator, Validator
# Arrary of validator actions
validators = []
########################################
# Define Validators
########################################
#############
stop_validator = KeywordValidator('stop')
validators.append(stop_validator)
@stop_validator.set('action')
def validator_action(message):
print 'STOP messaging for {}'.format(message.contact)
message.contact.set_status('stopped','Participant sent stop keyword')
message.text += ' - participant withdrew'
message.contact.send_automated_message(
send_base='stop',
send_offset=0,
group='one-way',
hiv_messaging=False,
control=True
)
return False
###############
validation_validator = Validator('validation')
validators.append(validation_validator)
@validation_validator.set('check')
def validator_action(message):
if re.match('^\d{5}$',message.text) and not message.contact.is_validated:
message.topic = 'validation'
message.is_related = True
message.is_viewed = True
if message.contact.validation_key == message.text.strip():
message.text = 'Validation Code Correct: ' + message.text
return True
else:
message.text = 'Validation Code Incorrect: ' + message.text
return False
return False
@validation_validator.set('action')
def validator_action(message):
# print 'VALIDATION ACTION for {}'.format(contact)
message.contact.is_validated = True
message.contact.save()
return False # Don't continue validation check s
###############
study_group_validator = Validator('study_group')
validators.append(study_group_validator)
@study_group_validator.set('check')
def validator_action(message):
if message.contact.study_group in ('one-way','control'):
message.text = "WARNGING: {} {}".format(message.contact.study_group.upper(), message.text)
message.is_viewed = True
return True
return False
@study_group_validator.set('action')
def validator_action(message):
# Send contact bounce message
message.contact.send_automated_message(send_base='bounce',send_offset=0,hiv_messaging=False,control=True)
| {
"content_hash": "20f5cc323997ca9168732f1c5eb8c221",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 109,
"avg_line_length": 32.42028985507246,
"alnum_prop": 0.6548949485918641,
"repo_name": "I-TECH-UW/mwachx",
"id": "20bf81f5e9d745590a2714ad0d612164ab2fbe67",
"size": "2237",
"binary": false,
"copies": "2",
"ref": "refs/heads/neo",
"path": "transports/validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14250"
},
{
"name": "HTML",
"bytes": "56837"
},
{
"name": "JavaScript",
"bytes": "43890"
},
{
"name": "Python",
"bytes": "358250"
},
{
"name": "Shell",
"bytes": "2976"
}
],
"symlink_target": ""
} |
from contextlib import contextmanager
@contextmanager
def multi_file_manager(files, mode, newline, encoding):
# https://stackoverflow.com/a/21683192
files = [open(file, mode, newline, encoding) for file in files]
yield files
for file in files:
file.close()
| {
"content_hash": "8956c2f2d5ae10e57083fd204c65e4c9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 67,
"avg_line_length": 23.75,
"alnum_prop": 0.7017543859649122,
"repo_name": "Holovin/D_GrabDemo",
"id": "e260ed745e02586ca37d2c367a23657adc29d3c3",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/multi_file_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36271"
}
],
"symlink_target": ""
} |
from django.conf import settings
from cms.models import Title
from minitrue.base import replacer
from minitrue.contrib.django_cms.utils import plugin_get_url
def title_get_url(obj):
return obj.page.get_absolute_url()
replacer.register(Title, fields=['title', 'page_title', 'menu_title', 'redirect', 'meta_description', 'meta_keywords'],
urlgetter=title_get_url, select_related=['page'])
if 'cms.plugins.text' in settings.INSTALLED_APPS:
from cms.plugins.text.models import Text
replacer.register(Text, fields=['body'], urlgetter=plugin_get_url,
select_related=['placeholder__page'])
if 'cms.plugins.snippet' in settings.INSTALLED_APPS:
from cms.plugins.snippet.models import Snippet
replacer.register(Snippet, fields=['html'], select_related=['placeholder__page'])
if 'cms.plugins.file' in settings.INSTALLED_APPS:
from cms.plugins.file.models import File
replacer.register(File, fields=['title'],
urlgetter=plugin_get_url,
select_related=['placeholder__page'],
)
if 'cms.plugins.link' in settings.INSTALLED_APPS:
from cms.plugins.link.models import Link
replacer.register(Link, fields=['name'], urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.picture' in settings.INSTALLED_APPS:
from cms.plugins.picture.models import Picture
replacer.register(Picture, fields=['alt', 'longdesc'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.teaser' in settings.INSTALLED_APPS:
from cms.plugins.teaser.models import Teaser
replacer.register(Teaser, fields=['title', 'description'],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
if 'cms.plugins.twitter' in settings.INSTALLED_APPS:
from cms.plugins.twitter.models import TwitterRecentEntries, TwitterSearch
replacer.register(TwitterRecentEntries, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
)
replacer.register(TwitterSearch, fields=['title',],
urlgetter=plugin_get_url,
select_related=['placeholder__page']
) | {
"content_hash": "940540122991e911fb33374ded117a6a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 119,
"avg_line_length": 36.76271186440678,
"alnum_prop": 0.7076994006454588,
"repo_name": "piquadrat/django-minitrue",
"id": "a7c37104f5cb40cbfe45de7795d9b25be901cade",
"size": "2169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minitrue/contrib/django_cms/searchreplace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3679"
},
{
"name": "Python",
"bytes": "31519"
}
],
"symlink_target": ""
} |
import asyncio
import logging
import os
import ssl
import threading
import traceback
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection, ProtocolError, Setting # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived, DataReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import logger as quic_connection_logger # type: ignore
from aioquic.quic.connection import stream_is_unidirectional
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated, StreamReset # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
from .capsule import H3Capsule, H3CapsuleDecoder, CapsuleType
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
# Set aioquic's log level to WARNING to suppress some INFO logs which are
# recorded every connection close.
quic_connection_logger.setLevel(logging.WARNING)
class H3ConnectionWithDatagram04(H3Connection):
"""
A H3Connection subclass, to make it work with the latest
HTTP Datagram protocol.
"""
H3_DATAGRAM_04 = 0xffd277
# https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-h3-websockets-00#section-5
ENABLE_CONNECT_PROTOCOL = 0x08
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._supports_h3_datagram_04 = False
def _validate_settings(self, settings: Dict[int, int]) -> None:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
if H3_DATAGRAM_04 in settings and settings[H3_DATAGRAM_04] == 1:
settings[Setting.H3_DATAGRAM] = 1
self._supports_h3_datagram_04 = True
return super()._validate_settings(settings)
def _get_local_settings(self) -> Dict[int, int]:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
settings = super()._get_local_settings()
settings[H3_DATAGRAM_04] = 1
settings[H3ConnectionWithDatagram04.ENABLE_CONNECT_PROTOCOL] = 1
return settings
@property
def supports_h3_datagram_04(self) -> bool:
"""
True if the client supports the latest HTTP Datagram protocol.
"""
return self._supports_h3_datagram_04
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3ConnectionWithDatagram04] = None
self._session_stream_id: Optional[int] = None
self._close_info: Optional[Tuple[int, bytes]] = None
self._capsule_decoder_for_session_stream: H3CapsuleDecoder =\
H3CapsuleDecoder()
self._allow_calling_session_closed = True
self._allow_datagrams = False
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3ConnectionWithDatagram04(
self._quic, enable_webtransport=True)
if not self._http.supports_h3_datagram_04:
self._allow_datagrams = True
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
if isinstance(event, StreamReset):
if self._handler:
self._handler.stream_reset(event.stream_id, event.error_code)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._session_stream_id = event.stream_id
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
if isinstance(event, DataReceived) and\
self._session_stream_id == event.stream_id:
if self._http and not self._http.supports_h3_datagram_04 and\
len(event.data) > 0:
raise ProtocolError('Unexpected data on the session stream')
self._receive_data_on_session_stream(
event.data, event.stream_ended)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
if self._allow_datagrams:
self._handler.datagram_received(data=event.data)
def _receive_data_on_session_stream(self, data: bytes, fin: bool) -> None:
self._capsule_decoder_for_session_stream.append(data)
if fin:
self._capsule_decoder_for_session_stream.final()
for capsule in self._capsule_decoder_for_session_stream:
if capsule.type in {CapsuleType.DATAGRAM,
CapsuleType.REGISTER_DATAGRAM_CONTEXT,
CapsuleType.CLOSE_DATAGRAM_CONTEXT}:
raise ProtocolError(
f"Unimplemented capsule type: {capsule.type}")
if capsule.type in {CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT,
CapsuleType.CLOSE_WEBTRANSPORT_SESSION}:
# We'll handle this case below.
pass
else:
# We should ignore unknown capsules.
continue
if self._close_info is not None:
raise ProtocolError((
"Receiving a capsule with type = {} after receiving " +
"CLOSE_WEBTRANSPORT_SESSION").format(capsule.type))
if capsule.type == CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT:
buffer = Buffer(data=capsule.data)
format_type = buffer.pull_uint_var()
# https://ietf-wg-webtrans.github.io/draft-ietf-webtrans-http3/draft-ietf-webtrans-http3.html#name-datagram-format-type
WEBTRANPORT_FORMAT_TYPE = 0xff7c00
if format_type != WEBTRANPORT_FORMAT_TYPE:
raise ProtocolError(
"Unexpected datagram format type: {}".format(
format_type))
self._allow_datagrams = True
elif capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(data=capsule.data)
code = buffer.pull_uint32()
# 4 bytes for the uint32.
reason = buffer.pull_bytes(len(capsule.data) - 4)
# TODO(yutakahirano): Make sure `reason` is a UTF-8 text.
self._close_info = (code, reason)
if fin:
self._call_session_closed(self._close_info, abruptly=False)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except OSError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
(b"sec-webtransport-http3-draft", b"draft02"),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self._http.send_data(session_stream_id, capsule.encode(), end_stream=False)
self._http.send_data(session_stream_id, b'', end_stream=True)
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
if not self._protocol._allow_datagrams:
_logger.warn(
"Sending a datagram while that's now allowed - discarding it")
return
flow_id = self.session_id
if self._http.supports_h3_datagram_04:
# The REGISTER_DATAGRAM_NO_CONTEXT capsule was on the session
# stream, so we must have the ID of the stream.
assert self._protocol._session_stream_id is not None
# TODO(yutakahirano): Make sure if this is the correct logic.
# Chrome always use 0 for the initial stream and the initial flow
# ID, we cannot check the correctness with it.
flow_id = self._protocol._session_stream_id // 4
self._http.send_datagram(flow_id=flow_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
def stream_reset(self, stream_id: int, error_code: int) -> None:
self._run_callback(
"stream_reset", self._session, stream_id, error_code)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
| {
"content_hash": "8716dc8b1a3bc4a8df1498313f7c8e50",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 135,
"avg_line_length": 40.58195211786372,
"alnum_prop": 0.6056906879651479,
"repo_name": "nwjs/chromium.src",
"id": "d3031b4c0c2f531f97720249a0596f04c8c63d75",
"size": "22087",
"binary": false,
"copies": "13",
"ref": "refs/heads/nw70",
"path": "third_party/wpt_tools/wpt/tools/webtransport/h3/webtransport_h3_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Main script to run video classification."""
import argparse
import sys
import time
import cv2
from video_classifier import VideoClassifier
from video_classifier import VideoClassifierOptions
# Visualization parameters
_ROW_SIZE = 20 # pixels
_LEFT_MARGIN = 24 # pixels
_TEXT_COLOR = (0, 0, 255) # red
_FONT_SIZE = 1
_FONT_THICKNESS = 1
_MODEL_FPS = 5 # Ensure the input images are fed to the model at this fps.
_MODEL_FPS_ERROR_RANGE = 0.1 # Acceptable error range in fps.
def run(model: str, label: str, max_results: int, num_threads: int,
camera_id: int, width: int, height: int) -> None:
"""Continuously run inference on images acquired from the camera.
Args:
model: Name of the TFLite video classification model.
label: Name of the video classification label.
max_results: Max of classification results.
num_threads: Number of CPU threads to run the model.
camera_id: The camera id to be passed to OpenCV.
width: The width of the frame captured from the camera.
height: The height of the frame captured from the camera.
"""
# Initialize the video classification model
options = VideoClassifierOptions(
num_threads=num_threads, max_results=max_results)
classifier = VideoClassifier(model, label, options)
# Variables to calculate FPS
counter, fps, last_inference_start_time, time_per_infer = 0, 0, 0, 0
categories = []
# Start capturing video input from the camera
cap = cv2.VideoCapture(camera_id)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# Continuously capture images from the camera and run inference
while cap.isOpened():
success, image = cap.read()
if not success:
sys.exit(
'ERROR: Unable to read from webcam. Please verify your webcam settings.'
)
counter += 1
# Mirror the image
image = cv2.flip(image, 1)
# Ensure that frames are feed to the model at {_MODEL_FPS} frames per second
# as required in the model specs.
current_frame_start_time = time.time()
diff = current_frame_start_time - last_inference_start_time
if diff * _MODEL_FPS >= (1 - _MODEL_FPS_ERROR_RANGE):
# Store the time when inference starts.
last_inference_start_time = current_frame_start_time
# Calculate the inference FPS
fps = 1.0 / diff
# Convert the frame to RGB as required by the TFLite model.
frame_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Feed the frame to the video classification model.
categories = classifier.classify(frame_rgb)
# Calculate time required per inference.
time_per_infer = time.time() - current_frame_start_time
# Notes: Frames that aren't fed to the model are still displayed to make the
# video look smooth. We'll show classification results from the latest
# classification run on the screen.
# Show the FPS .
fps_text = 'Current FPS = {0:.1f}. Expect: {1}'.format(fps, _MODEL_FPS)
text_location = (_LEFT_MARGIN, _ROW_SIZE)
cv2.putText(image, fps_text, text_location, cv2.FONT_HERSHEY_PLAIN,
_FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)
# Show the time per inference.
time_per_infer_text = 'Time per inference: {0}ms'.format(
int(time_per_infer * 1000))
text_location = (_LEFT_MARGIN, _ROW_SIZE * 2)
cv2.putText(image, time_per_infer_text, text_location,
cv2.FONT_HERSHEY_PLAIN, _FONT_SIZE, _TEXT_COLOR,
_FONT_THICKNESS)
# Show classification results on the image.
for idx, category in enumerate(categories):
class_name = category.label
probability = round(category.score, 2)
result_text = class_name + ' (' + str(probability) + ')'
# Skip the first 2 lines occupied by the fps and time per inference.
text_location = (_LEFT_MARGIN, (idx + 3) * _ROW_SIZE)
cv2.putText(image, result_text, text_location, cv2.FONT_HERSHEY_PLAIN,
_FONT_SIZE, _TEXT_COLOR, _FONT_THICKNESS)
# Stop the program if the ESC key is pressed.
if cv2.waitKey(1) == 27:
break
cv2.imshow('video_classification', image)
cap.release()
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--model',
help='Name of video classification model.',
required=False,
default='movinet_a0_int8.tflite')
parser.add_argument(
'--label',
help='Name of video classification label.',
required=False,
default='kinetics600_label_map.txt')
parser.add_argument(
'--maxResults',
help='Max of classification results.',
required=False,
default=3)
parser.add_argument(
'--numThreads',
help='Number of CPU threads to run the model.',
required=False,
default=4)
parser.add_argument(
'--cameraId', help='Id of camera.', required=False, default=0)
parser.add_argument(
'--frameWidth',
help='Width of frame to capture from camera.',
required=False,
default=640)
parser.add_argument(
'--frameHeight',
help='Height of frame to capture from camera.',
required=False,
default=480)
args = parser.parse_args()
run(args.model, args.label, int(args.maxResults), int(args.numThreads),
int(args.cameraId), args.frameWidth, args.frameHeight)
if __name__ == '__main__':
main()
| {
"content_hash": "600b2594236b5ebf220cd6466d167973",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 82,
"avg_line_length": 34.36477987421384,
"alnum_prop": 0.66599560761347,
"repo_name": "tensorflow/examples",
"id": "9a53748ca5bcf28b685463db56e33318a641a265",
"size": "6072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lite/examples/video_classification/raspberry_pi/classify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
import os
PROJECT_DIR = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
)
CACHE_BACKEND = 'locmem:///'
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'cms.sqlite'
TEST_DATABASE_CHARSET = "utf8"
TEST_DATABASE_COLLATION = "utf8_general_ci"
DATABASE_SUPPORTS_TRANSACTIONS = True
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/')
CMS_MEDIA_ROOT = os.path.join(PROJECT_DIR, '../cms/media/cms/')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
FIXTURE_DIRS = [os.path.join(PROJECT_DIR, 'fixtures')]
SECRET_KEY = '*xq7m@)*f2awoj!spa0(jibsrz9%c0d=e(g)v*!17y(vx0ue_3'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
'django.core.context_processors.csrf',
"cms.context_processors.media",
)
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'cms.middleware.multilingual.MultilingualURLMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'cms.middleware.media.PlaceholderMediaMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'cms',
'publisher',
'menus',
'cms.plugins.text',
'cms.plugins.picture',
'cms.plugins.file',
'cms.plugins.flash',
'cms.plugins.link',
'cms.plugins.snippet',
'cms.plugins.googlemap',
'cms.plugins.teaser',
'cms.plugins.video',
'cms.plugins.twitter',
'cms.plugins.inherit',
'mptt',
'example.sampleapp',
# 'south',
)
gettext = lambda s: s
LANGUAGE_CODE = "en"
LANGUAGES = (
('fr', gettext('French')),
('de', gettext('German')),
('en', gettext('English')),
('pt-BR', gettext("Brazil")),
)
CMS_LANGUAGE_CONF = {
'de':['fr', 'en'],
'en':['fr', 'de'],
}
CMS_SITE_LANGUAGES = {
1:['fr','de','en','pt-BR'],
2:['de','en'],
}
APPEND_SLASH = True
CMS_TEMPLATES = (
('col_two.html', gettext('two columns')),
('col_three.html', gettext('three columns')),
('nav_playground.html', gettext('navigation examples')),
)
CMS_PLACEHOLDER_CONF = {
'col_sidebar': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin', 'TextPlugin', 'SnippetPlugin'),
'name': gettext("sidebar column")
},
'col_left': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin', 'TextPlugin', 'SnippetPlugin','GoogleMapPlugin',),
'name': gettext("left column")
},
'col_right': {
'plugins': ('FilePlugin', 'FlashPlugin', 'LinkPlugin', 'PicturePlugin', 'TextPlugin', 'SnippetPlugin','GoogleMapPlugin',),
'name': gettext("right column")
},
}
CMS_SOFTROOT = True
CMS_MODERATOR = True
CMS_PERMISSION = True
CMS_REDIRECTS = True
CMS_SEO_FIELDS = True
CMS_FLAT_URLS = False
CMS_MENU_TITLE_OVERWRITE = True
CMS_HIDE_UNTRANSLATED = False
CMS_URL_OVERWRITE = True
SOUTH_TESTS_MIGRATE = False
try:
from local_settings import *
except ImportError:
pass | {
"content_hash": "1f736f6e0a213fdc813c31388b50a33d",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 130,
"avg_line_length": 23.879518072289155,
"alnum_prop": 0.6458123107971746,
"repo_name": "dibaunaumh/tikal-corp-website",
"id": "ddb48f7cb2f24ccb5042ba048ca6eb2953a4f3af",
"size": "3999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "780451"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "1558449"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
} |
from pyramid.renderers import render_to_response
from groupdocs.ApiClient import ApiClient
from groupdocs.AntApi import AntApi
from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner
# Checking value on null
def IsNotNull(value):
return value is not None and len(value) > 0
# Set variables and get POST data
def sample13(request):
clientId = request.POST.get('client_id')
privateKey = request.POST.get('private_key')
fileGuId = request.POST.get('fileId')
email = request.POST.get('email')
# Checking required parameters
if IsNotNull(clientId) == False or IsNotNull(privateKey) == False or IsNotNull(fileGuId) == False or IsNotNull(email) == False:
return render_to_response('__main__:templates/sample13.pt',
{ 'error' : 'You do not enter all parameters' })
### Create Signer, ApiClient and Annotation Api objects
# Create signer object
signer = GroupDocsRequestSigner(privateKey)
# Create apiClient object
apiClient = ApiClient(signer)
# Create Annotation object
ant = AntApi(apiClient)
try:
# Make a request to Annotation API
ant.SetAnnotationCollaborators(clientId, fileGuId, "v2.0", body=[email])
except Exception, e:
return render_to_response('__main__:templates/sample13.pt',
{ 'error' : str(e) })
# If request was successfull - set variables for template
return render_to_response('__main__:templates/sample13.pt',
{
'userId' : clientId,
'privateKey' : privateKey,
'fileId' : fileGuId,
'email' : email
},
request=request) | {
"content_hash": "68530e5b4c9be14a8827b85d3c93f403",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 131,
"avg_line_length": 35,
"alnum_prop": 0.656547619047619,
"repo_name": "liosha2007/temporary-groupdocs-python-sdk",
"id": "c3cd4312179c8d4305d79a10898dfb4350c85ab9",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/api-samples/inc_samples/sample13.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1070081"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import argparse
import datetime
import httplib
import glob
import imp
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
import time
import urllib
import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
WATCHLISTS_FILE = "WATCHLISTS"
RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
# V8 base directory.
V8_BASE = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Add our copy of depot_tools to the PATH as many scripts use tools from there,
# e.g. git-cl, fetch, git-new-branch etc, and we can not depend on depot_tools
# being in the PATH on the LUCI bots.
path_to_depot_tools = os.path.join(V8_BASE, 'third_party', 'depot_tools')
new_path = path_to_depot_tools + os.pathsep + os.environ.get('PATH')
os.environ['PATH'] = new_path
def TextToFile(text, file_name):
with open(file_name, "w") as f:
f.write(text)
def AppendToFile(text, file_name):
with open(file_name, "a") as f:
f.write(text)
def LinesInFile(file_name):
with open(file_name) as f:
for line in f:
yield line
def FileToText(file_name):
with open(file_name) as f:
return f.read()
def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
def SortingKey(version):
"""Key for sorting version number strings: '3.11' > '3.2.1.1'"""
version_keys = map(int, version.split("."))
# Fill up to full version numbers to normalize comparison.
while len(version_keys) < 4: # pragma: no cover
version_keys.append(0)
# Fill digits.
return ".".join(map("{0:04d}".format, version_keys))
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print("Command: %s" % cmd_line)
print("in %s" % cwd)
sys.stdout.flush()
try:
if pipe:
return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
else:
return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
return None
finally:
sys.stdout.flush()
sys.stderr.flush()
def SanitizeVersionTag(tag):
version_without_prefix = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
version_with_prefix = re.compile(r"^tags\/\d+\.\d+\.\d+(?:\.\d+)?$")
if version_without_prefix.match(tag):
return tag
elif version_with_prefix.match(tag):
return tag[len("tags/"):]
else:
return None
def NormalizeVersionTags(version_tags):
normalized_version_tags = []
# Remove tags/ prefix because of packed refs.
for current_tag in version_tags:
version_tag = SanitizeVersionTag(current_tag)
if version_tag != None:
normalized_version_tags.append(version_tag)
return normalized_version_tags
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
return Command(cmd, args, prefix, pipe, cwd=cwd)
def ReadLine(self):
return sys.stdin.readline().strip()
def ReadURL(self, url, params=None):
# pylint: disable=E1121
url_fh = urllib2.urlopen(url, params, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def ReadClusterFuzzAPI(self, api_key, **params):
params["api_key"] = api_key.strip()
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
data = response.read()
try:
return json.loads(data)
except:
print(data)
print("ERROR: Could not read response. Is your key valid?")
raise
def Sleep(self, seconds):
time.sleep(seconds)
def GetUTCStamp(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
class NoRetryException(Exception):
pass
class VCInterface(object):
def InjectStep(self, step):
self.step=step
def Pull(self):
raise NotImplementedError()
def Fetch(self):
raise NotImplementedError()
def GetTags(self):
raise NotImplementedError()
def GetBranches(self):
raise NotImplementedError()
def MasterBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
def RemoteMasterBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
raise NotImplementedError()
def RemoteBranch(self, name):
raise NotImplementedError()
def CLLand(self):
raise NotImplementedError()
def Tag(self, tag, remote, message):
"""Sets a tag for the current commit.
Assumptions: The commit already landed and the commit message is unique.
"""
raise NotImplementedError()
class GitInterface(VCInterface):
def Pull(self):
self.step.GitPull()
def Fetch(self):
self.step.Git("fetch")
def GetTags(self):
return self.step.Git("tag").strip().splitlines()
def GetBranches(self):
# Get relevant remote branches, e.g. "branch-heads/3.25".
branches = filter(
lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
self.step.GitRemotes())
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
def MasterBranch(self):
return "master"
def CandidateBranch(self):
return "candidates"
def RemoteMasterBranch(self):
return "origin/master"
def RemoteCandidateBranch(self):
return "origin/candidates"
def RemoteBranch(self, name):
# Assume that if someone "fully qualified" the ref, they know what they
# want.
if name.startswith('refs/'):
return name
if name in ["candidates", "master"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
return "refs/remotes/origin/%s" % name
except GitFailedException:
pass
try:
# Check if branch is in branch-heads.
if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
return "refs/remotes/branch-heads/%s" % name
except GitFailedException:
pass
self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
for wait_interval in [10, 30, 60, 60, 60, 60, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
break
print("The commit has not replicated to git. Waiting for %s seconds." %
wait_interval)
self.step._side_effect_handler.Sleep(wait_interval)
else:
self.step.Die("Couldn't determine commit for setting the tag. Maybe the "
"git updater is lagging behind?")
self.step.Git("tag %s %s" % (tag, commit))
self.step.Git("push origin refs/tags/%s:refs/tags/%s" % (tag, tag))
def CLLand(self):
self.step.GitCLLand()
class Step(GitRecipesMixin):
def __init__(self, text, number, config, state, options, handler):
self._text = text
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
self.vc = GitInterface()
self.vc.InjectStep(self)
# The testing configuration might set a different default cwd.
self.default_cwd = (self._config.get("DEFAULT_CWD") or
os.path.join(self._options.work_dir, "v8"))
assert self._number >= 0
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
# Restore state.
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
print(">>> Step %d: %s" % (self._number, self._text))
try:
return self.RunStep()
finally:
# Persist state.
TextToFile(json.dumps(self._state), state_file)
def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
""" Retry a function.
Params:
cb: The function to retry.
retry_on: A callback that takes the result of the function and returns
True if the function should be retried. A function throwing an
exception is always retried.
wait_plan: A list of waiting delays between retries in seconds. The
maximum number of retries is len(wait_plan).
"""
retry_on = retry_on or (lambda x: False)
wait_plan = list(wait_plan or [])
wait_plan.reverse()
while True:
got_exception = False
try:
result = cb()
except NoRetryException as e:
raise e
except Exception as e:
got_exception = e
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
print("Waiting for %f seconds." % wait_time)
self._side_effect_handler.Sleep(wait_time)
print("Retrying...")
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print("%s (forced)" % default)
return default
else:
return self._side_effect_handler.ReadLine()
def Command(self, name, args, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
name, args, "", True, cwd=cwd or self.default_cwd)
return self.Retry(cmd, None, [5])
def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
"git", args, prefix, pipe, cwd=cwd or self.default_cwd)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
def Editor(self, args):
if self._options.requires_editor:
return self._side_effect_handler.Command(
os.environ["EDITOR"],
args,
pipe=False,
cwd=self.default_cwd)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
cmd = lambda: self._side_effect_handler.ReadURL(url, params)
return self.Retry(cmd, retry_on, wait_plan)
def Die(self, msg=""):
if msg != "":
print("Error: %s" % msg)
print("Exiting")
raise Exception(msg)
def DieNoManualMode(self, msg=""):
if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
def Confirm(self, msg):
print("%s [Y/n] " % msg, end=' ')
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name, cwd=None):
for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
print("Branch %s deleted." % name)
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
def InitialEnvironmentChecks(self, cwd):
# Cancel if this is not a git checkout.
if not os.path.exists(os.path.join(cwd, ".git")): # pragma: no cover
self.Die("%s is not a git checkout. If you know what you're doing, try "
"deleting it and rerunning this script." % cwd)
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
self.Command(
"which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Checkout master in case the script was left on a work branch.
self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
self.GitCheckout('origin/master')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
if os.path.isfile(f):
os.remove(f)
if os.path.isdir(f):
shutil.rmtree(f)
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
("minor", "V8_MINOR_VERSION"),
("build", "V8_BUILD_NUMBER"),
("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
"that it's sensible, do so in another shell, but remember to not "
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print("> ", end=' ')
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print("That was not 'LGTM'.")
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
"or resolve the conflicts, stage *all* touched files with "
"'git add', and type \"RESOLVED<Return>\"" % (patch_file))
self.DieNoManualMode()
answer = ""
while answer != "RESOLVED":
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print("That was not 'RESOLVED' or 'ABORT'.")
print("> ", end=' ')
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
def ApplyPatch(self, patch_file, revert=False):
try:
self.GitApplyPatch(patch_file, revert)
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
def GetVersionTag(self, revision):
tag = self.Git("describe --tags %s" % revision).strip()
return SanitizeVersionTag(tag)
def GetRecentReleases(self, max_age):
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
# Current timestamp.
time_now = int(self._side_effect_handler.GetUTCStamp())
# List every tag from a given period.
revisions = self.Git("rev-list --max-age=%d --tags" %
int(time_now - max_age)).strip()
# Filter out revisions who's tag is off by one or more commits.
return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
def GetLatestVersion(self):
# Use cached version if available.
if self["latest_version"]:
return self["latest_version"]
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
all_tags = self.vc.GetTags()
only_version_tags = NormalizeVersionTags(all_tags)
version = sorted(only_version_tags,
key=SortingKey, reverse=True)[0]
self["latest_version"] = version
return version
def GetLatestRelease(self):
"""The latest release is the git hash of the latest tagged version.
This revision should be rolled into chromium.
"""
latest_version = self.GetLatestVersion()
# The latest release.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
return latest_hash
def GetLatestReleaseBase(self, version=None):
"""The latest release base is the latest revision that is covered in the
last change log file. It doesn't include cherry-picked patches.
"""
latest_version = version or self.GetLatestVersion()
# Strip patch level if it exists.
latest_version = ".".join(latest_version.split(".")[:3])
# The latest release base.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
match = PUSH_MSG_GIT_RE.match(title)
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
# base on master through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
# This is a new-style v8 version branched from master. The commit
# "latest_hash" is the version-file change. Its parent is the release
# base on master.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
self[prefix + "minor"],
self[prefix + "build"],
self[prefix + "patch"]])
def StoreVersion(self, version, prefix):
version_parts = version.split(".")
if len(version_parts) == 3:
version_parts.append("0")
major, minor, build, patch = version_parts
self[prefix + "major"] = major
self[prefix + "minor"] = minor
self[prefix + "build"] = build
self[prefix + "patch"] = patch
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
elif (self[prefix + "candidate"] and
line.startswith("#define V8_IS_CANDIDATE_VERSION")):
line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
class BootstrapStep(Step):
MESSAGE = "Bootstrapping checkout and state."
def RunStep(self):
# Reserve state entry for json output.
self['json_output'] = {}
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
if not os.path.exists(self._options.work_dir):
os.makedirs(self._options.work_dir)
if not os.path.exists(self.default_cwd):
self.Command("fetch", "v8", cwd=self._options.work_dir)
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
reviewer = None
if self._options.reviewer:
print("Using account %s for review." % self._options.reviewer)
reviewer = self._options.reviewer
tbr_reviewer = None
if self._options.tbr_reviewer:
print("Using account %s for TBR review." % self._options.tbr_reviewer)
tbr_reviewer = self._options.tbr_reviewer
if not reviewer and not tbr_reviewer:
print(
"Please enter the email address of a V8 reviewer for your patch: ",
end=' ')
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
self.GitUpload(reviewer, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
cc=self._options.cc, tbr_reviewer=tbr_reviewer)
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
state = state if state is not None else {}
config = config if config is not None else {}
try:
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
return step_class(message, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
def __init__(self,
config=None,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
self._config = config or self._Config()
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
def _Description(self):
return None
def _PrepareOptions(self, parser):
pass
def _ProcessOptions(self, options):
return True
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
def _Config(self):
return {}
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for code review.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
parser.add_argument("--json-output",
help="File to write results summary to.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--tbr-reviewer", "--tbr", default="",
help="The account name to be used for TBR reviews.")
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
parser.add_argument("--work-dir",
help=("Location where to bootstrap a working v8 "
"checkout."))
self._PrepareOptions(parser)
if args is None: # pragma: no cover
options = parser.parse_args()
else:
options = parser.parse_args(args)
# Process common options.
if options.step < 0: # pragma: no cover
print("Bad step number %d" % options.step)
parser.print_help()
return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
options.bypass_upload_hooks = False
# Derived options.
options.requires_editor = not options.force
options.wait_for_lgtm = not options.force
options.force_readline_defaults = not options.manual
options.force_upload = not options.manual
# Process script specific options.
if not self._ProcessOptions(options):
parser.print_help()
return None
if not options.work_dir:
options.work_dir = "/tmp/v8-release-scripts-work-dir"
return options
def RunSteps(self, step_classes, args=None):
options = self.MakeOptions(args)
if not options:
return 1
# Ensure temp dir exists for state files.
state_dir = os.path.dirname(self._config["PERSISTFILE_BASENAME"])
if not os.path.exists(state_dir):
os.makedirs(state_dir)
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
steps = []
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
try:
for step in steps[options.step:]:
if step.Run():
return 0
finally:
if options.json_output:
with open(options.json_output, "w") as f:
json.dump(self._state['json_output'], f)
return 0
def Run(self, args=None):
return self.RunSteps(self._Steps(), args)
| {
"content_hash": "6efeacdccea2d350db8a7ab572de7b56",
"timestamp": "",
"source": "github",
"line_count": 788,
"max_line_length": 79,
"avg_line_length": 32.60279187817259,
"alnum_prop": 0.6366431824374295,
"repo_name": "youtube/cobalt_sandbox",
"id": "fd69075872f9a3887d196ccc3c2001fb7bd9b5c6",
"size": "27313",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/v8/tools/release/common_includes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Created on Fri Mar 9 14:22:28 2012
@author: eba
"""
def plotLines():
class line_entry():
def __init__(self,startx, starty, endx, endy):
self.startx=startx;
self.starty=starty;
self.endx=endx;
self.endy=endy;
def __repr__(self):
return "("+str(self.startx)+","+str(self.starty)+')-('+str(self.endx)+","+str(self.endy)+")"
lines = [];
import fileinput
import re
for line in fileinput.input('ulmsserver.ini'):
fields = line.split()
if (len(fields)>0 and fields[0] == 'addline'):
startx = float(re.search('startx=(.*)',fields[1]).group(1))
starty = float(re.search('starty=(.*)',fields[2]).group(1))
endx = float(re.search('endx=(.*)',fields[3]).group(1))
endy = float(re.search('endy=(.*)',fields[4]).group(1))
lines.append(line_entry(startx,starty,endx,endy))
import matplotlib.pyplot
for line in lines:
matplotlib.pyplot.plot([line.startx,line.endx],[line.starty,line.endy],'w',linewidth=3) | {
"content_hash": "1c193b99f4b2cdce95ad1c5183e4f44a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 103,
"avg_line_length": 34.28125,
"alnum_prop": 0.5542388331814039,
"repo_name": "enobayram/MHFlib",
"id": "ebe6f5a7eb8ab6aa5003d31c6281f9d0f14b2427",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MHFPython/scripts/plotLines.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "3377"
},
{
"name": "C++",
"bytes": "67017"
}
],
"symlink_target": ""
} |
"""Solum Deployer Heat handler."""
# import json
import logging
import socket
import time
from heatclient import exc
import httplib2
# from keystoneclient.v2_0 import client as ksclient
from oslo_config import cfg
from oslo_log import log as os_logging
from sqlalchemy import exc as sqla_exc
from swiftclient import exceptions as swiftexp
import yaml
import solum
from solum.api.handlers import userlog_handler
from solum.common import catalog
from solum.common import clients
from solum.common import exception
from solum.common import heat_utils
from solum.common import repo_utils
from solum.common import solum_glanceclient
from solum.common import solum_swiftclient
# from solum.common import utils
from solum import objects
from solum.objects import assembly
from solum.uploaders import tenant_logger as tlog
LOG = os_logging.getLogger(__name__)
STATES = assembly.States
SERVICE_OPTS = [
cfg.IntOpt('max_attempts',
default=600,
help=('Number of attempts to query the Heat stack for '
'finding out the status of the created stack and '
'getting url of the DU created in the stack')),
cfg.IntOpt('du_attempts',
default=500,
help=('Number of attempts to query the Docker DU for '
'finding out the status of the created app and '
'getting url of the DU created in the stack')),
cfg.IntOpt('wait_interval',
default=1,
help=('Sleep time interval between two attempts of querying '
'the Heat stack. This interval is in seconds.')),
cfg.FloatOpt('growth_factor',
default=1.1,
help=('Factor by which sleep time interval increases. '
'This value should be >= 1.0')),
cfg.StrOpt('flavor',
default="m1.small",
help='VM Flavor'),
cfg.StrOpt('image',
default="coreos",
help='Image id'),
cfg.StrOpt('key_name',
default="mykey",
help='keypair name'),
cfg.StrOpt('deployer_log_dir',
default="/var/log/solum/deployer",
help='Deployer logs location'),
]
def list_opts():
yield 'deployer', SERVICE_OPTS
cfg.CONF.register_opts(SERVICE_OPTS, group='deployer')
cfg.CONF.import_opt('image_format', 'solum.api.handlers.assembly_handler',
group='api')
cfg.CONF.import_opt('image_storage', 'solum.worker.config', group='worker')
cfg.CONF.import_opt('www_authenticate_uri', 'keystonemiddleware.auth_token',
group='keystone_authtoken')
deployer_log_dir = cfg.CONF.deployer.deployer_log_dir
def get_heat_client(ctxt, app):
# raw_content = json.loads(app.raw_content)
# username = raw_content['username']
# encoded_password = raw_content['password'].encode('ISO-8859-1')
# decrypted_password = utils.decrypt(encoded_password)
# password = decrypted_password
# tenant_name = raw_content['tenant_name']
# auth_url = cfg.CONF.keystone_authtoken.www_authenticate_uri
# ks_kwargs = {
# 'username': username,
# 'password': password,
# 'tenant_name': tenant_name,
# 'auth_url': auth_url
# }
# k_client = ksclient.Client(**ks_kwargs)
# auth_token = k_client.auth_token
osc = clients.OpenStackClients(ctxt)
# TODO(zhurong): Following works for github triggers.
# We should accommodate it with the current workflow.
# See for details: https://bugs.launchpad.net/solum/+bug/1671871
# heat = osc.heat(username, password, auth_token)
heat = osc.heat(token=ctxt.auth_token)
return heat
def save_du_ref_for_scaling(ctxt, assembly_id, du=None):
try:
wf = objects.registry.Workflow.get_by_assembly_id(assembly_id)
except sqla_exc.SQLAlchemyError as ex:
LOG.error("Failed to get workflow corresponding "
"to assembly %s" % assembly_id)
LOG.exception(ex)
return
if wf is not None:
try:
app = objects.registry.App.get_by_id(ctxt, wf.app_id)
current_scale_config = app.scale_config
if current_scale_config:
current_config = current_scale_config[app.name]
current_config['du'] = du
current_scale_config[app.name] = current_config
scale_config = dict()
scale_config['scale_config'] = current_scale_config
objects.registry.App.update_and_save(ctxt, app.id,
scale_config)
except sqla_exc.SQLAlchemyError as ex:
LOG.error("Failed to update app scale_config: %s" % app.id)
LOG.exception(ex)
def get_assembly_by_id(ctxt, assembly_id):
return solum.objects.registry.Assembly.get_by_id(ctxt, assembly_id)
def get_app_by_assem_id(ctxt, assembly_id):
assem = get_assembly_by_id(ctxt, assembly_id)
if assem:
plan = solum.objects.registry.Plan.get_by_id(ctxt, assem.plan_id)
app = solum.objects.registry.App.get_by_id(ctxt, plan.uuid)
return app
def update_wf_and_app(ctxt, assembly_id, data):
# Update workflow and app objects
data_dict = dict()
if data.get('status') is not None:
data_dict['status'] = data['status']
if data.get('application_uri') is not None:
data_dict['app_url'] = data['application_uri']
wf = None
try:
wf = objects.registry.Workflow.get_by_assembly_id(assembly_id)
objects.registry.Workflow.update_and_save(ctxt, wf.id, data_dict)
except sqla_exc.SQLAlchemyError as ex:
LOG.error("Failed to update workflow corresponding to assembly %s"
% assembly_id)
LOG.exception(ex)
except exception.ResourceNotFound as ex:
# This happens if plan (deprecated) was directly created
LOG.error("Workflow not found for assembly %s" % assembly_id)
LOG.exception(ex)
if wf is not None:
try:
app = objects.registry.App.get_by_id(ctxt, wf.app_id)
objects.registry.App.update_and_save(ctxt, app.id, data_dict)
except sqla_exc.SQLAlchemyError as ex:
LOG.error("Failed to update app status and app URL: %s" % app.id)
LOG.exception(ex)
def update_assembly(ctxt, assembly_id, data):
# Here we are updating the assembly synchronously (i.e. without
# using the conductor). This is because when using the conductor latency
# is introduced between the update call and when assembly's state is
# actually updated in the database. This latency leads to concurrency
# bugs within deployers' actions when multiple deployers are present
# in the system.
try:
objects.registry.Assembly.update_and_save(ctxt, assembly_id, data)
except sqla_exc.SQLAlchemyError as ex:
LOG.error("Failed to update assembly status, ID: %s" % assembly_id)
LOG.exception(ex)
try:
update_wf_and_app(ctxt, assembly_id, data)
except Exception as ex:
LOG.error("Failed to update workflow and app status for assembly: %s"
% assembly_id)
LOG.exception(ex)
class Handler(object):
def __init__(self):
super(Handler, self).__init__()
objects.load()
def echo(self, ctxt, message):
LOG.debug("%s" % message)
def _get_stack_name(self, assembly, prefix_len=100):
assem_name = assembly.name
# heat stack name has a max allowable length of 255
return ''.join([assem_name[:min(len(assem_name), prefix_len)], '-',
assembly.uuid])
def _clean_up_artifacts(self, ctxt, t_logger,
logs_resource_id, assem):
try:
if cfg.CONF.worker.image_storage == 'swift':
self._delete_app_artifacts_from_swift(ctxt, t_logger,
assem)
elif cfg.CONF.worker.image_storage == 'glance':
self._delete_app_artifacts_from_glance(ctxt, t_logger,
assem)
else:
LOG.debug("image_storage option %s not recognized." %
cfg.CONF.worker.image_storage)
except Exception as e:
LOG.exception(e)
# Delete logs
try:
log_handler = userlog_handler.UserlogHandler(ctxt)
log_handler.delete(logs_resource_id)
except exception.AuthorizationFailure as authexcp:
t_logger.log(logging.ERROR, str(authexcp))
LOG.debug(str(authexcp))
t_logger.upload()
def _delete_app_artifacts_from_glance(self, ctxt, t_logger, assem):
if assem.image_id:
img = objects.registry.Image.get_by_id(ctxt, assem.image_id)
if img.external_ref:
try:
glance = solum_glanceclient.GlanceClient(ctxt)
glance.delete_image_by_id(img.external_ref)
except swiftexp.ClientException:
msg = "Unable to delete DU image from glance."
t_logger.log(logging.ERROR, msg)
LOG.debug(msg)
t_logger.upload()
return
img.destroy(ctxt)
def _delete_app_artifacts_from_swift(self, ctxt, t_logger, assem):
if assem.image_id:
img = objects.registry.Image.get_by_id(ctxt, assem.image_id)
if img.docker_image_name:
img_filename = img.docker_image_name.split('-', 1)[1]
try:
swift = solum_swiftclient.SwiftClient(ctxt)
swift.delete_object('solum_du', img_filename)
except swiftexp.ClientException:
msg = "Unable to delete DU image from swift."
t_logger.log(logging.ERROR, msg)
LOG.debug(msg)
t_logger.upload()
return
img.destroy(ctxt)
def destroy_assembly(self, ctxt, assem_id):
update_assembly(ctxt, assem_id,
{'status': STATES.DELETING})
assem = objects.registry.Assembly.get_by_id(ctxt, assem_id)
app_obj = get_app_by_assem_id(ctxt, assem.id)
LOG.debug("Deleting app %s" % app_obj.name)
logs_resource_id = assem.uuid
stack_id = self._find_id_if_stack_exists(assem)
try:
workflow = objects.registry.Workflow.get_by_assembly_id(assem_id)
workflow_id = workflow.id
wf_found = True
except exception.ResourceNotFound:
# get_by_assembly_id would result in ResourceNotFound exception
# if assembly is created directly. In that case use assembly_id
# with TenantLogger
workflow_id = assem.uuid
wf_found = False
# TODO(devkulkarni) Delete t_logger when returning from this call.
# This needs to be implemented as a context since there are
# multiple return paths from this method.
t_logger = tlog.TenantLogger(ctxt,
assem,
workflow_id,
deployer_log_dir,
'delete')
msg = "Deleting Assembly %s Workflow %s" % (assem.uuid, workflow_id)
t_logger.log(logging.DEBUG, msg)
LOG.debug(msg)
if stack_id is None:
t_logger.upload()
self._clean_up_artifacts(ctxt, t_logger, logs_resource_id, assem)
if not wf_found:
assem.destroy(ctxt)
return
else:
# Get the heat client
heat_clnt = get_heat_client(ctxt, app_obj)
try:
t_logger.log(logging.DEBUG, "Deleting Heat stack.")
LOG.debug("Deleting Heat stack %s", stack_id)
heat_clnt.stacks.delete(stack_id)
except exc.HTTPNotFound:
# stack already deleted
t_logger.log(logging.ERROR, "Heat stack not found.")
t_logger.upload()
self._clean_up_artifacts(ctxt, t_logger, logs_resource_id,
assem)
if not wf_found:
assem.destroy(ctxt)
return
except Exception as e:
LOG.exception(e)
update_assembly(ctxt, assem_id,
{'status': STATES.ERROR_STACK_DELETE_FAILED})
t_logger.log(logging.ERROR, "Error deleting heat stack.")
t_logger.upload()
return
wait_interval = cfg.CONF.deployer.wait_interval
growth_factor = cfg.CONF.deployer.growth_factor
stack_name = self._get_stack_name(assem)
t_logger.log(logging.DEBUG, "Checking if Heat stack was deleted.")
for count in range(cfg.CONF.deployer.max_attempts):
try:
# Must use stack_name for expecting a 404
heat_clnt.stacks.get(stack_name)
except exc.HTTPNotFound:
t_logger.log(logging.DEBUG, "Stack delete successful.")
t_logger.upload()
self._clean_up_artifacts(ctxt, t_logger, logs_resource_id,
assem)
if not wf_found:
assem.destroy(ctxt)
return
time.sleep(wait_interval)
wait_interval *= growth_factor
update_assembly(ctxt, assem_id,
{'status': STATES.ERROR_STACK_DELETE_FAILED})
t_logger.log(logging.ERROR, "Error deleting heat stack.")
t_logger.upload()
def _destroy_other_assemblies(self, ctxt, assembly_id, heat_clnt):
# Except current app's stack, destroy all other app stacks
# We query the newly deployed assembly's object here to
# ensure that we get most up-to-date value for created_at attribute.
# If we use the already available object then there is a possibility
# that the attribute does not have the most up-to-date value due to the
# possibility that SQLAlchemy might not synchronize object's db state
# with its in-memory representation.
new_assembly = objects.registry.Assembly.get_by_id(ctxt, assembly_id)
# Fetch all assemblies by plan id, and self.destroy() them.
new_assem_id = new_assembly.id
app_id = new_assembly.plan_id
created_at = new_assembly.created_at
assemblies = objects.registry.AssemblyList.get_earlier(
new_assem_id,
app_id,
STATES.DEPLOYMENT_COMPLETE,
created_at)
for assem in assemblies:
if assem.id == new_assembly.id:
continue
# Just delete the old heat stacks and don't delete assemblies
stack_id = self._find_id_if_stack_exists(assem)
try:
LOG.debug("Deleting Heat stack %s", stack_id)
heat_clnt.stacks.delete(stack_id)
except exc.HTTPNotFound:
# stack already deleted
LOG.debug("Heat stack not found %s", stack_id)
continue
except Exception as e:
LOG.exception(e)
continue
# wait for deletion to complete
wait_interval = cfg.CONF.deployer.wait_interval
growth_factor = cfg.CONF.deployer.growth_factor
stack_name = self._get_stack_name(assem)
for count in range(cfg.CONF.deployer.max_attempts):
try:
# Must use stack_name for expecting a 404
heat_clnt.stacks.get(stack_name)
except exc.HTTPNotFound:
LOG.debug("Heat stack deleted %s", stack_id)
continue
time.sleep(wait_interval)
wait_interval *= growth_factor
def destroy_app(self, ctxt, app_id):
# Get all the workflows for app_id
workflows = objects.registry.WorkflowList.get_all(ctxt, app_id)
# For each workflow get the assembly and call destroy assembly to
# remove heat stacks.
for workflow in workflows:
assemblie = objects.registry.Assembly.get_by_id(ctxt,
workflow.assembly)
self.destroy_assembly(ctxt, assemblie.id)
# Delete workflow based on app_id
try:
objects.registry.Workflow.destroy(app_id)
except Exception:
LOG.error("Workflow for app %s not found ", app_id)
# Delete app
try:
db_obj = objects.registry.App.get_by_uuid(ctxt, app_id)
db_obj.destroy(ctxt)
except exception.ResourceNotFound:
LOG.error("App %s not found ", app_id)
def scale(self, ctxt, assembly_id):
# TODO(devkulkarni) Find out scale target by querying the app table
# deploy that many number of dus
raise exception.NotImplemented()
def deploy(self, ctxt, assembly_id, image_loc, image_name, ports):
app_obj = get_app_by_assem_id(ctxt, assembly_id)
LOG.debug("Deploying app %s" % app_obj.name)
save_du_ref_for_scaling(ctxt, assembly_id, du=image_loc)
# Get the heat client
heat_clnt = get_heat_client(ctxt, app_obj)
# Get reference to OpenStackClients as it is used to get a reference
# to neutron client for getting networking parameters
osc = clients.OpenStackClients(ctxt)
assem = objects.registry.Assembly.get_by_id(ctxt,
assembly_id)
workflow = objects.registry.Workflow.get_by_assembly_id(assembly_id)
# TODO(devkulkarni) Delete t_logger when returning from this call.
# This needs to be implemented as a decorator since there are
# multiple return paths from this method.
t_logger = tlog.TenantLogger(ctxt,
assem,
workflow.id,
deployer_log_dir,
'deploy')
msg = "Deploying Assembly %s Workflow %s" % (assem.uuid, workflow.id)
t_logger.log(logging.DEBUG, msg)
LOG.debug("Image loc:%s, image_name:%s" % (image_loc, image_name))
parameters = self._get_parameters(ctxt, cfg.CONF.api.image_format,
image_loc, image_name, assem,
ports, osc, t_logger)
LOG.debug(parameters)
if parameters is None:
return
template = self._get_template(ctxt, cfg.CONF.api.image_format,
cfg.CONF.worker.image_storage, image_loc,
image_name, assem, ports, t_logger)
LOG.debug(template)
if template is None:
return
stack_name = self._get_stack_name(assem)
stack_id = self._find_id_if_stack_exists(assem)
if assem.status == STATES.DELETING:
t_logger.log(logging.DEBUG, "Assembly being deleted..returning")
t_logger.upload()
return
if stack_id is not None:
try:
heat_clnt.stacks.update(stack_id,
stack_name=stack_name,
template=template,
parameters=parameters)
except Exception as e:
LOG.error("Error updating Heat Stack for,"
" assembly %s" % assembly_id)
LOG.exception(e)
update_assembly(ctxt, assembly_id, {'status': STATES.ERROR})
t_logger.log(logging.ERROR, "Error updating heat stack.")
t_logger.upload()
return
else:
try:
getfile_key = "robust-du-handling.sh"
file_cnt = None
try:
file_cnt = catalog.get_from_contrib(getfile_key)
except exception.ObjectNotFound as onf_ex:
LOG.exception(onf_ex)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.ERROR, "Error reading %s"
% getfile_key)
t_logger.upload()
return
get_file_dict = {}
get_file_dict[getfile_key] = file_cnt
created_stack = heat_clnt.stacks.create(stack_name=stack_name,
template=template,
parameters=parameters,
files=get_file_dict)
except Exception as exp:
LOG.error("Error creating Heat Stack for,"
" assembly %s" % assembly_id)
LOG.exception(exp)
update_assembly(ctxt, assembly_id,
{'status': STATES.ERROR_STACK_CREATE_FAILED})
t_logger.log(logging.ERROR, "Error creating heat stack.")
t_logger.upload()
return
stack_id = created_stack['stack']['id']
LOG.debug("Stack id: %s" % stack_id)
comp_name = 'Heat_Stack_for_%s' % assem.name
comp_description = 'Heat Stack %s' % (
yaml.safe_load(template).get('description'))
try:
objects.registry.Component.assign_and_create(
ctxt, assem, comp_name, 'heat_stack', comp_description,
created_stack['stack']['links'][0]['href'], stack_id)
except sqla_exc.IntegrityError:
LOG.error("IntegrityError in creating Heat Stack component,"
" assembly %s may be deleted" % assembly_id)
update_assembly(ctxt, assembly_id, {'status': STATES.ERROR})
t_logger.log(logging.ERROR, "Error creating heat stack.")
t_logger.upload()
return
update_assembly(ctxt, assembly_id, {'status': STATES.DEPLOYING})
result = self._check_stack_status(ctxt, assembly_id, heat_clnt,
stack_id, ports, t_logger)
assem.status = result
t_logger.upload()
if result == STATES.DEPLOYMENT_COMPLETE:
self._destroy_other_assemblies(ctxt, assembly_id, heat_clnt)
def _get_template(self, ctxt, image_format, image_storage,
image_loc, image_name, assem, ports, t_logger):
template = None
if image_format == 'docker':
try:
template = catalog.get('templates', 'basic')
except exception.ObjectNotFound as onf_ex:
LOG.exception(onf_ex)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.ERROR, "Error reading heat template.")
t_logger.upload()
return template
elif image_format == 'vm':
if image_storage == 'glance':
msg = ("image_storage %s not supported with image_format %s" %
(image_storage, image_format))
LOG.debug(msg)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.DEBUG, "Solum config error: %s " % msg)
t_logger.upload()
else:
try:
template = catalog.get('templates', 'coreos')
except exception.ObjectNotFound as onf_ex:
LOG.exception(onf_ex)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.ERROR, "Error reading heat template.")
t_logger.upload()
return template
if image_storage == 'docker_registry':
template = self._get_template_for_docker_reg(
assem, template, image_loc, image_name, ports)
else:
LOG.debug("Image format %s is not supported." % image_format)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.DEBUG, "Solum config error: Image format.")
t_logger.upload()
return template
def _get_parameters(self, ctxt, image_format, image_loc, image_name,
assem, ports, osc, t_logger):
parameters = None
if image_format == 'docker':
glance_img_uuid = image_loc
LOG.debug("Image id:%s" % glance_img_uuid)
LOG.debug("Specified ports:%s" % ports)
LOG.debug("Picking first port..")
port_to_use = ports[0]
LOG.debug("Application port:%s" % port_to_use)
parameters = {'app_name': assem.name,
'image': glance_img_uuid,
'port': port_to_use}
parameters.update(heat_utils.get_network_parameters(osc))
elif image_format == 'vm':
parameters = {'name': str(assem.uuid),
'flavor': cfg.CONF.deployer.flavor,
'key_name': cfg.CONF.deployer.key_name,
'image': cfg.CONF.deployer.image}
ports_str = ''
for port in ports:
ports_str += ' -p {pt}:{pt}'.format(pt=port)
parameters['location'] = image_loc
parameters['du'] = image_name
parameters['publish_ports'] = ports_str.strip()
else:
LOG.debug("Image format %s is not supported." % image_format)
update_assembly(ctxt, assem.id, {'status': STATES.ERROR})
t_logger.log(logging.DEBUG, "Solum config error: Image format.")
t_logger.upload()
return parameters
def _check_stack_status(self, ctxt, assembly_id, heat_clnt, stack_id,
ports, t_logger):
wait_interval = cfg.CONF.deployer.wait_interval
growth_factor = cfg.CONF.deployer.growth_factor
stack = None
for count in range(cfg.CONF.deployer.max_attempts):
time.sleep(wait_interval)
wait_interval *= growth_factor
try:
stack = heat_clnt.stacks.get(stack_id)
except Exception as e:
LOG.exception(e)
continue
if stack.status == 'COMPLETE':
break
elif stack.status == 'FAILED':
update_assembly(ctxt, assembly_id,
{'status': STATES.ERROR_STACK_CREATE_FAILED})
lg_msg = "App deployment failed: Heat stack creation failure"
t_logger.log(logging.ERROR, lg_msg)
return STATES.ERROR_STACK_CREATE_FAILED
if stack is None or (stack and stack.status == ""):
update_assembly(ctxt, assembly_id,
{'status': STATES.ERROR_STACK_CREATE_FAILED})
lg_msg = "App deployment failed: Heat stack is in unexpected state"
t_logger.log(logging.ERROR, lg_msg)
return STATES.ERROR_STACK_CREATE_FAILED
host_ip = self._parse_server_ip(stack)
if host_ip is None:
LOG.exception("Could not parse url from heat stack.")
update_assembly(ctxt, assembly_id,
{'status': STATES.ERROR})
lg_msg = ("App deployment failed: "
"container IP address not available")
t_logger.log(logging.ERROR, lg_msg)
return STATES.ERROR
app_uri = host_ip
if len(ports) == 1:
app_uri += ":" + str(ports[0])
if len(ports) > 1:
port_list = ','.join(str(p) for p in ports)
app_uri += ":[" + port_list + "]"
to_upd = {'status': STATES.STARTING_APP,
'application_uri': app_uri}
update_assembly(ctxt, assembly_id, to_upd)
LOG.debug("Application URI: %s" % app_uri)
successful_ports = set()
du_is_up = False
for count in range(cfg.CONF.deployer.du_attempts):
for prt in ports:
if prt not in successful_ports:
du_url = 'http://{host}:{port}'.format(host=host_ip,
port=prt)
try:
if repo_utils.is_reachable(du_url):
successful_ports.add(prt)
if len(successful_ports) == len(ports):
du_is_up = True
break
except socket.timeout:
LOG.debug("Connection to %s timed out"
"assembly ID: %s" % (du_url, assembly_id))
except (httplib2.HttpLib2Error, socket.error) as serr:
if count % 5 == 0:
LOG.exception(serr)
else:
LOG.debug(".")
except Exception as exp:
LOG.exception(exp)
update_assembly(ctxt, assembly_id,
{'status': STATES.ERROR})
lg_msg = ("App deployment error: unexpected error "
" when trying to reach app endpoint")
t_logger.log(logging.ERROR, lg_msg)
return STATES.ERROR
if du_is_up:
break
time.sleep(1)
if du_is_up:
to_update = {'status': STATES.DEPLOYMENT_COMPLETE}
else:
to_update = {'status': STATES.ERROR_CODE_DEPLOYMENT}
lg_msg = ("App deployment error: unreachable server or port, "
" please check your port config.")
t_logger.log(logging.ERROR, lg_msg)
update_assembly(ctxt, assembly_id, to_update)
return to_update['status']
def _parse_server_ip(self, heat_output):
"""Parse server ip from heat-stack-show output."""
if 'outputs' in heat_output._info:
for outputs in heat_output._info['outputs']:
if outputs['output_key'] == 'public_ip':
return outputs['output_value']
return None
def _find_id_if_stack_exists(self, assem):
if assem.heat_stack_component is not None:
return assem.heat_stack_component.heat_stack_id
return None
def _get_template_for_docker_reg(self, assem, template,
image_loc, image_name, ports):
du_name = image_loc
ports_str = ''
for port in ports:
ports_str += ' -p {pt}:{pt}'.format(pt=port)
run_docker_str = ('#!/bin/bash -x\n'
'# Invoke the container\n'
'docker run {publish_ports} -d {du}\n'
'wc_notify --data-binary {stat}')
run_docker = run_docker_str.format(publish_ports=ports_str.strip(),
du=du_name,
stat='\'{"status": "SUCCESS"}\'')
LOG.debug("run_docker:%s" % run_docker)
template_bdy = yaml.safe_load(template)
comp_instance = template_bdy['resources']['compute_instance']
user_data = comp_instance['properties']['user_data']
user_data['str_replace']['template'] = run_docker
comp_instance['properties']['user_data'] = user_data
template_bdy['resources']['compute_instance'] = comp_instance
template = yaml.dump(template_bdy)
return template
| {
"content_hash": "40167598449381e0fdf8321fb7b896e4",
"timestamp": "",
"source": "github",
"line_count": 789,
"max_line_length": 79,
"avg_line_length": 41.02027883396705,
"alnum_prop": 0.5404603738606519,
"repo_name": "stackforge/solum",
"id": "3c20d6e658f9854bf739b49713b4f0109474bf2e",
"size": "32976",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "solum/deployer/handlers/heat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1243294"
},
{
"name": "Shell",
"bytes": "80784"
}
],
"symlink_target": ""
} |
"""Test the listsincelast RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error
class ListSinceBlockTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
def test_no_blockhash(self):
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"removed": [],
"transactions": txs})
def test_invalid_blockhash(self):
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"invalid-hex")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
self.log.info('lastblockhash=%s' % (lastblockhash))
self.sync_all([self.nodes[:2], self.nodes[2:]])
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert found
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives BTC in tx1 from utxo1 in block aa1.
2. User 2 receives BTC in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(
self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipient_dict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmation count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipient_dict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxo_dicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransactionwithwallet(
self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
self.nodes[0].gettransaction(txid1)
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
if __name__ == '__main__':
ListSinceBlockTest().main()
| {
"content_hash": "7bc8a7941d28df270151c508e78ed329",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 106,
"avg_line_length": 34.11469534050179,
"alnum_prop": 0.5685017860895146,
"repo_name": "merelcoin/merelcoin",
"id": "53e671cd3b0ac1172a3b5eaba8511ba04e4606d8",
"size": "9732",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "test/functional/wallet_listsinceblock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "694223"
},
{
"name": "C++",
"bytes": "6032230"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "196429"
},
{
"name": "Makefile",
"bytes": "2491551"
},
{
"name": "NSIS",
"bytes": "6834"
},
{
"name": "Objective-C",
"bytes": "6153"
},
{
"name": "Objective-C++",
"bytes": "6588"
},
{
"name": "Python",
"bytes": "1474453"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Roff",
"bytes": "2559606"
},
{
"name": "Shell",
"bytes": "886663"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import os
# Fix the multiprocessing issue with NumPy compiled against OPENBLAS
if 'OPENBLAS_MAIN_FREE' not in os.environ:
os.environ['OPENBLAS_MAIN_FREE'] = '1'
# automatically set number of threads used by MKL and openblas to 1
# prevents errors when running things in parallel. Should be set
# by user directly in a script or notebook if >1 is needed.
# Must be set BEFORE importing NumPy
if 'MKL_NUM_THREADS' not in os.environ:
os.environ['MKL_NUM_THREADS'] = '1'
if 'OPENBLAS_NUM_THREADS' not in os.environ:
os.environ['OPENBLAS_NUM_THREADS'] = '1'
import sys
import warnings
import qutip.settings
import qutip.version
from qutip.version import version as __version__
from qutip.utilities import _version2int
# -----------------------------------------------------------------------------
# Check if we're in IPython.
try:
__IPYTHON__
qutip.settings.ipython = True
except:
qutip.settings.ipython = False
# -----------------------------------------------------------------------------
# Check for minimum requirements of dependencies, give the user a warning
# if the requirements aren't fulfilled
#
numpy_requirement = "1.6.0"
try:
import numpy
if _version2int(numpy.__version__) < _version2int(numpy_requirement):
print("QuTiP warning: old version of numpy detected " +
("(%s), requiring %s." %
(numpy.__version__, numpy_requirement)))
except:
warnings.warn("numpy not found.")
scipy_requirement = "0.11.0"
try:
import scipy
if _version2int(scipy.__version__) < _version2int(scipy_requirement):
print("QuTiP warning: old version of scipy detected " +
("(%s), requiring %s." %
(scipy.__version__, scipy_requirement)))
except:
warnings.warn("scipy not found.")
# -----------------------------------------------------------------------------
# check to see if running from install directory for released versions.
#
top_path = os.path.dirname(os.path.dirname(__file__))
try:
setup_file = open(top_path + '/setup.py', 'r')
except:
pass
else:
if ('QuTiP' in setup_file.readlines()[1][3:]) and qutip.version.release:
print("You are in the installation directory. " +
"Change directories before running QuTiP.")
setup_file.close()
del top_path
# -----------------------------------------------------------------------------
# setup the cython environment
#
_cython_requirement = "0.15.0"
try:
import Cython
if _version2int(Cython.__version__) < _version2int(_cython_requirement):
print("QuTiP warning: old version of cython detected " +
("(%s), requiring %s." %
(Cython.__version__, _cython_requirement)))
import pyximport
os.environ['CFLAGS'] = '-O3 -w -ffast-math -march=native -mfpmath=sse'
pyximport.install(setup_args={'include_dirs': [numpy.get_include()]})
except Exception as e:
print("QuTiP warning: Cython setup failed: " + str(e))
else:
del Cython, pyximport
# -----------------------------------------------------------------------------
# Load user configuration if present: override defaults.
#
try:
if os.name == "nt":
qutip_rc_file = os.path.join(
os.getenv('APPDATA'), 'qutip', "qutiprc"
)
else:
qutip_rc_file = os.path.join(
# This should possibly be changed to ~/.config/qutiprc,
# to follow XDG specs. Also, OS X uses a different naming
# convention as well.
os.environ['HOME'], ".qutiprc"
)
qutip.settings.load_rc_file(qutip_rc_file)
except Exception as e:
try:
qutip.settings._logger.warning("Error loading RC file.", exc_info=1)
except:
pass
# -----------------------------------------------------------------------------
# cpu/process configuration
#
import multiprocessing
# Check if environ flag for qutip processes is set
if 'QUTIP_NUM_PROCESSES' in os.environ:
qutip.settings.num_cpus = int(os.environ['QUTIP_NUM_PROCESSES'])
else:
os.environ['QUTIP_NUM_PROCESSES'] = str(qutip.settings.num_cpus)
if qutip.settings.num_cpus == 0:
# if num_cpu is 0 set it to the available number of cores
from qutip.hardware_info import hardware_info
info = hardware_info()
if 'cpus' in info:
qutip.settings.num_cpus = info['cpus']
else:
qutip.settings.num_cpus = multiprocessing.cpu_count()
# -----------------------------------------------------------------------------
# Load configuration from environment variables: override defaults and
# configuration file.
#
# check for fortran mcsolver files
try:
from qutip.fortran import mcsolve_f90
except:
qutip.settings.fortran = False
else:
qutip.settings.fortran = True
# check for scikits.umfpack
try:
import scikits.umfpack as umfpack
except:
qutip.settings.umfpack = False
else:
qutip.settings.umfpack = True
del umfpack
# -----------------------------------------------------------------------------
# Check that import modules are compatible with requested configuration
#
# Check for Matplotlib
try:
import matplotlib
except:
warnings.warn("matplotlib not found: Graphics will not work.")
else:
del matplotlib
# -----------------------------------------------------------------------------
# Clean name space
#
del os, sys, numpy, scipy, multiprocessing
# -----------------------------------------------------------------------------
# Load modules
#
# core
from qutip.qobj import *
from qutip.states import *
from qutip.operators import *
from qutip.expect import *
from qutip.tensor import *
from qutip.superoperator import *
from qutip.superop_reps import *
from qutip.subsystem_apply import *
from qutip.graph import *
# graphics
from qutip.bloch import *
from qutip.visualization import *
from qutip.orbital import *
from qutip.bloch3d import *
from qutip.matplotlib_utilities import *
# library functions
from qutip.tomography import *
from qutip.wigner import *
from qutip.random_objects import *
from qutip.simdiag import *
from qutip.entropy import *
from qutip.metrics import *
from qutip.partial_transpose import *
from qutip.permute import *
from qutip.continuous_variables import *
from qutip.distributions import *
from qutip.three_level_atom import *
# evolution
from qutip.solver import *
from qutip.rhs_generate import *
from qutip.mesolve import *
from qutip.sesolve import *
from qutip.mcsolve import *
from qutip.stochastic import *
from qutip.essolve import *
from qutip.eseries import *
from qutip.propagator import *
from qutip.floquet import *
from qutip.bloch_redfield import *
from qutip.steadystate import *
from qutip.correlation import *
from qutip.countstat import *
# quantum information
from qutip.qip import *
# utilities
from qutip.parallel import *
from qutip.utilities import *
from qutip.fileio import *
from qutip.about import *
# perturbation theory
from qutip.perturbation import *
| {
"content_hash": "b6e3af707450c4dcc002024343d4c231",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 79,
"avg_line_length": 29.628691983122362,
"alnum_prop": 0.621048134434634,
"repo_name": "zasdfgbnm/qutip",
"id": "7dbb98d4f3fcb20401200f3d15c8ef640840d35d",
"size": "8837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qutip/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "FORTRAN",
"bytes": "259707"
},
{
"name": "Makefile",
"bytes": "3079"
},
{
"name": "Python",
"bytes": "1733190"
},
{
"name": "Shell",
"bytes": "2931"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.