text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Simple trigrams-based text generation
"""
__version__ = '0.1.0'
import re
import json
from random import sample
class TrigramsDB(object):
"""
A trigrams database. It has two main methods: ``feed``, to initialize it
with some existing text, and ``generate``, to generate some new text. The
more text you feed it, the more "random" the generated text will be.
"""
_WSEP = '###' # words separator
def __init__(self, dbfile=None):
"""
Initialize a new trigrams database. If ``dbfile`` is given, the
database is read and written from/to this file.
"""
self.dbfile = dbfile
self._load()
def save(self, output=None):
"""
Save the database to a file. If ``output`` is not given, the ``dbfile``
given in the constructor is used.
"""
if output is None:
if self.dbfile is None:
return
output = self.dbfile
with open(output, 'w') as f:
f.write(self._dump())
def feed(self, text=None, source=None):
"""
Feed some text to the database, either from a string (``text``) or a
file (``source``).
>>> db = TrigramsDB()
>>> db.feed("This is my text")
>>> db.feed(source="some/file.txt")
"""
if text is not None:
words = re.split(r'\s+', text)
wlen = len(words)
for i in range(wlen - 2):
self._insert(words[i:i+3])
if source is not None:
with open(source, 'r') as f:
self.feed(f.read())
def generate(self, **kwargs):
"""
Generate some text from the database. By default only 70 words are
generated, but you can change this using keyword arguments.
Keyword arguments:
- ``wlen``: maximum length (words)
- ``words``: a list of words to use to begin the text with
"""
words = list(map(self._sanitize, kwargs.get('words', [])))
max_wlen = kwargs.get('wlen', 70)
wlen = len(words)
if wlen < 2:
if not self._db:
return ''
if wlen == 0:
words = sample(self._db.keys(), 1)[0].split(self._WSEP)
elif wlen == 1:
spl = [k for k in self._db.keys()
if k.startswith(words[0]+self._WSEP)]
words.append(sample(spl, 1)[0].split(self._WSEP)[1])
wlen = 2
while wlen < max_wlen:
next_word = self._get(words[-2], words[-1])
if next_word is None:
break
words.append(next_word)
wlen += 1
return ' '.join(words)
def _load(self):
"""
Load the database from its ``dbfile`` if it has one
"""
if self.dbfile is not None:
with open(self.dbfile, 'r') as f:
self._db = json.loads(f.read())
else:
self._db = {}
def _dump(self):
"""
Return a string version of the database, which can then be used by
``_load`` to get the original object back.
"""
return json.dumps(self._db)
def _get(self, word1, word2):
"""
Return a possible next word after ``word1`` and ``word2``, or ``None``
if there's no possibility.
"""
key = self._WSEP.join([self._sanitize(word1), self._sanitize(word2)])
key = key.lower()
if key not in self._db:
return
return sample(self._db[key], 1)[0]
def _sanitize(self, word):
"""
Sanitize a word for insertion in the DB
"""
return word.replace(self._WSEP, '')
def _insert(self, trigram):
"""
Insert a trigram in the DB
"""
words = list(map(self._sanitize, trigram))
key = self._WSEP.join(words[:2]).lower()
next_word = words[2]
self._db.setdefault(key, [])
# we could use a set here, but sets are not serializables in JSON. This
# is the same reason we use dicts instead of defaultdicts.
if next_word not in self._db[key]:
self._db[key].append(next_word)
|
{
"content_hash": "75b4174aee374d388a502a4a90646bf3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.5162659123055162,
"repo_name": "bfontaine/trigrams",
"id": "486202bb9ba2e51cc44966f872d3cf1675034fcf",
"size": "4267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trigrams/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13687"
}
],
"symlink_target": ""
}
|
import errno
import hashlib
import io
import logging
import os
from pprint import pformat
import re
from six.moves.urllib.parse import quote
import time
import xml.etree.ElementTree as XML
import jenkins
from jenkins_jobs.alphanum import AlphanumSort
from jenkins_jobs.cache import JobCache
from jenkins_jobs.constants import MAGIC_MANAGE_STRING
from jenkins_jobs.parallel import concurrent
from jenkins_jobs import utils
__all__ = [
"JenkinsManager"
]
logger = logging.getLogger(__name__)
_DEFAULT_TIMEOUT = object()
class JenkinsManager(object):
def __init__(self, jjb_config):
url = jjb_config.jenkins['url']
user = jjb_config.jenkins['user']
password = jjb_config.jenkins['password']
timeout = jjb_config.jenkins['timeout']
if timeout != _DEFAULT_TIMEOUT:
self.jenkins = jenkins.Jenkins(url, user, password, timeout)
else:
self.jenkins = jenkins.Jenkins(url, user, password)
self.cache = JobCache(jjb_config.jenkins['url'],
flush=jjb_config.builder['flush_cache'])
self._plugins_list = jjb_config.builder['plugins_info']
self._jobs = None
self._job_list = None
self._views = None
self._view_list = None
self._jjb_config = jjb_config
def _setup_output(self, output, item, config_xml=False):
output_dir = output
output_fn = os.path.join(output, item)
if '/' in item:
# in item folder
output_fn = os.path.join(output, os.path.normpath(item))
output_dir = os.path.dirname(output_fn)
# if in a folder, re-adding name to the directory here
if config_xml:
output_dir = os.path.join(
output_dir, os.path.basename(item))
output_fn = os.path.join(output_dir, 'config.xml')
if output_dir != output:
logger.debug("Creating directory %s" % output_dir)
try:
os.makedirs(output_dir)
except OSError:
if not os.path.isdir(output_dir):
raise
return output_fn
@property
def jobs(self):
if self._jobs is None:
# populate jobs
self._jobs = self.jenkins.get_all_jobs()
return self._jobs
@property
def job_list(self):
if self._job_list is None:
# python-jenkins uses 'fullname' for folder/name combination
self._job_list = set(job['fullname'] for job in self.jobs)
return self._job_list
def _job_format(self, job_name):
# returns job name or url based on config option
if self._jjb_config.builder['print_job_urls']:
return self._jjb_config.jenkins['url'] + \
'/job/' + quote(
'/job/'.join(job_name.split('/')).encode('utf8')) + '/'
else:
return job_name
def _view_format(self, view_name):
# returns job name or url based on config option
if self._jjb_config.builder['print_job_urls']:
parts = view_name.split('/')
return self._jjb_config.jenkins['url'] + \
''.join(['/job/' + item for item in parts[:-1]]) + \
'/view/' + parts[-1] + '/'
else:
return view_name
def update_job(self, job_name, xml):
if self.is_job(job_name):
logger.info("Reconfiguring jenkins job {0}".format(
self._job_format(job_name)))
self.jenkins.reconfig_job(job_name, xml)
else:
logger.info("Creating jenkins job {0}".format(
self._job_format(job_name)))
self.jenkins.create_job(job_name, xml)
def is_job(self, job_name, use_cache=True):
if use_cache:
if job_name in self.job_list:
return True
return self.jenkins.job_exists(job_name)
def get_job_md5(self, job_name):
xml = self.jenkins.get_job_config(job_name)
return hashlib.md5(xml.encode('utf-8')).hexdigest()
def delete_job(self, job_name):
if self.is_job(job_name):
logger.info("Deleting jenkins job {0}".format(job_name))
self.jenkins.delete_job(job_name)
def get_plugins_info(self):
""" Return a list of plugin_info dicts, one for each plugin on the
Jenkins instance.
"""
try:
plugins_list = self.jenkins.get_plugins().values()
except jenkins.JenkinsException as e:
if re.search("(Connection refused|Forbidden)", str(e)):
logger.warning(
"Unable to retrieve Jenkins Plugin Info from {0},"
" using default empty plugins info list.".format(
self.jenkins.server))
plugins_list = [{'shortName': '',
'version': '',
'longName': ''}]
else:
raise
logger.debug("Jenkins Plugin Info {0}".format(pformat(plugins_list)))
return plugins_list
def get_jobs(self, cache=True):
if not cache:
self._jobs = None
self._job_list = None
return self.jobs
def is_managed(self, job_name):
xml = self.jenkins.get_job_config(job_name)
try:
out = XML.fromstring(xml)
description = out.find(".//description").text
return description.endswith(MAGIC_MANAGE_STRING)
except (TypeError, AttributeError):
pass
return False
@property
def plugins_list(self):
if self._plugins_list is None:
self._plugins_list = self.get_plugins_info()
return self._plugins_list
def delete_old_managed(self, keep=None):
jobs = self.get_jobs()
deleted_jobs = 0
if keep is None:
keep = []
for job in jobs:
# python-jenkins stores the folder and name as 'fullname'
# Check if the job was deleted when his parent folder was deleted
if job['fullname'] not in keep and \
self.is_job(job['fullname'], use_cache=False):
if self.is_managed(job['fullname']):
logger.info("Removing obsolete jenkins job {0}"
.format(job['fullname']))
self.delete_job(job['fullname'])
deleted_jobs += 1
else:
logger.info("Not deleting unmanaged jenkins job %s",
job['fullname'])
else:
logger.debug("Keeping job %s", job['fullname'])
return deleted_jobs
def delete_jobs(self, jobs):
if jobs is not None:
logger.info("Removing jenkins job(s): %s" % ", ".join(jobs))
for job in jobs:
self.delete_job(job)
if(self.cache.is_cached(job)):
self.cache.set(job, '')
self.cache.save()
def delete_all_jobs(self):
jobs = self.get_jobs()
logger.info("Number of jobs to delete: %d", len(jobs))
script = ('for(job in jenkins.model.Jenkins.theInstance.getAllItems())'
' { job.delete(); }')
self.jenkins.run_script(script)
# Need to clear the JJB cache after deletion
self.cache.clear()
def changed(self, job):
md5 = job.md5()
changed = (self._jjb_config.builder['ignore_cache'] or
self.cache.has_changed(job.name, md5))
if not changed:
logger.debug("'{0}' has not changed".format(job.name))
return changed
def exists(self, job):
exists = self.jenkins.job_exists(job.name)
if not exists:
logger.debug("'{0}' does not currently exist".format(job.name))
return exists
def update_jobs(self, xml_jobs, output=None, n_workers=None,
existing_only=None, config_xml=False):
orig = time.time()
logger.info("Number of jobs generated: %d", len(xml_jobs))
xml_jobs.sort(key=AlphanumSort)
if (output and not hasattr(output, 'write') and
not os.path.isdir(output)):
logger.debug("Creating directory %s" % output)
try:
os.makedirs(output)
except OSError:
if not os.path.isdir(output):
raise
if output:
# ensure only wrapped once
if hasattr(output, 'write'):
output = utils.wrap_stream(output)
for job in xml_jobs:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("Job name: %s", job.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(job.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = self._setup_output(output, job.name, config_xml)
logger.debug("Writing XML to '{0}'".format(output_fn))
with io.open(output_fn, 'w', encoding='utf-8') as f:
f.write(job.output().decode('utf-8'))
return xml_jobs, len(xml_jobs)
# Filter out the jobs that did not change
logging.debug('Filtering %d jobs for changed jobs',
len(xml_jobs))
step = time.time()
jobs = [job for job in xml_jobs
if self.changed(job)]
logging.debug("Filtered for changed jobs in %ss",
(time.time() - step))
if existing_only:
# Filter out the jobs not already in the cache
logging.debug('Filtering %d jobs for existing jobs',
len(jobs))
step = time.time()
jobs = [job for job in jobs
if self.exists(job)]
logging.debug("Filtered for existing jobs in %ss",
(time.time() - step))
if not jobs:
return [], 0
# Update the jobs
logging.debug('Updating jobs')
step = time.time()
p_params = [{'job': job} for job in jobs]
results = self.parallel_update_job(
n_workers=n_workers,
concurrent=p_params)
logging.debug("Parsing results")
# generalize the result parsing, as a concurrent job always returns a
# list
if len(p_params) in (1, 0):
results = [results]
for result in results:
if isinstance(result, Exception):
raise result
elif not self._jjb_config.builder['ignore_cache']:
# update in-memory cache
j_name, j_md5 = result
self.cache.set(j_name, j_md5)
if not self._jjb_config.builder['ignore_cache']:
# write cache to disk
self.cache.save()
logging.debug("Updated %d jobs in %ss",
len(jobs),
time.time() - step)
logging.debug("Total run took %ss", (time.time() - orig))
return jobs, len(jobs)
@concurrent
def parallel_update_job(self, job):
self.update_job(job.name, job.output().decode('utf-8'))
return (job.name, job.md5())
################
# View related #
################
@property
def views(self):
if self._views is None:
# populate views
self._views = self.jenkins.get_views()
return self._views
@property
def view_list(self):
if self._view_list is None:
self._view_list = set(view['name'] for view in self.views)
return self._view_list
def get_views(self, cache=True):
if not cache:
self._views = None
self._view_list = None
return self.views
def is_view(self, view_name):
# first use cache
if view_name in self.view_list:
return True
# if not exists, use jenkins
return self.jenkins.view_exists(view_name)
def delete_view(self, view_name):
if self.is_view(view_name):
logger.info("Deleting jenkins view {}".format(view_name))
self.jenkins.delete_view(view_name)
def delete_views(self, views):
if views is not None:
logger.info("Removing jenkins view(s): %s" % ", ".join(views))
for view in views:
self.delete_view(view)
if self.cache.is_cached(view):
self.cache.set(view, '')
self.cache.save()
def delete_all_views(self):
views = self.get_views()
# Jenkins requires at least one view present. Don't remove the first
# view as it is likely the default view.
views.pop(0)
logger.info("Number of views to delete: %d", len(views))
for view in views:
self.delete_view(view['name'])
# Need to clear the JJB cache after deletion
self.cache.clear()
def update_view(self, view_name, xml):
if self.is_view(view_name):
logger.info("Reconfiguring jenkins view {0}".format(
self._view_format(view_name)))
self.jenkins.reconfig_view(view_name, xml)
else:
logger.info("Creating jenkins view {0}".format(
self._view_format(view_name)))
self.jenkins.create_view(view_name, xml)
def update_views(self, xml_views, output=None, n_workers=None,
existing_only=None, config_xml=False):
orig = time.time()
logger.info("Number of views generated: %d", len(xml_views))
xml_views.sort(key=AlphanumSort)
if output:
# ensure only wrapped once
if hasattr(output, 'write'):
output = utils.wrap_stream(output)
for view in xml_views:
if hasattr(output, 'write'):
# `output` is a file-like object
logger.info("View name: %s", view.name)
logger.debug("Writing XML to '{0}'".format(output))
try:
output.write(view.output())
except IOError as exc:
if exc.errno == errno.EPIPE:
# EPIPE could happen if piping output to something
# that doesn't read the whole input (e.g.: the UNIX
# `head` command)
return
raise
continue
output_fn = self._setup_output(output, view.name, config_xml)
logger.debug("Writing XML to '{0}'".format(output_fn))
with io.open(output_fn, 'w', encoding='utf-8') as f:
f.write(view.output().decode('utf-8'))
return xml_views, len(xml_views)
# Filter out the views that did not change
logging.debug('Filtering %d views for changed views',
len(xml_views))
step = time.time()
views = [view for view in xml_views
if self.changed(view)]
logging.debug("Filtered for changed views in %ss",
(time.time() - step))
if existing_only:
# Filter out the jobs not already in the cache
logging.debug('Filtering %d views for existing jobs',
len(views))
step = time.time()
views = [view for view in views
if self.exists(view)]
logging.debug("Filtered for existing views in %ss",
(time.time() - step))
if not views:
return [], 0
# Update the views
logging.debug('Updating views')
step = time.time()
p_params = [{'view': view} for view in views]
results = self.parallel_update_view(
n_workers=n_workers,
concurrent=p_params)
logging.debug("Parsing results")
# generalize the result parsing, as a concurrent view always returns a
# list
if len(p_params) in (1, 0):
results = [results]
for result in results:
if isinstance(result, Exception):
raise result
else:
# update in-memory cache
v_name, v_md5 = result
self.cache.set(v_name, v_md5)
# write cache to disk
self.cache.save()
logging.debug("Updated %d views in %ss",
len(views),
time.time() - step)
logging.debug("Total run took %ss", (time.time() - orig))
return views, len(views)
@concurrent
def parallel_update_view(self, view):
self.update_view(view.name, view.output().decode('utf-8'))
return (view.name, view.md5())
|
{
"content_hash": "0d5a0b75e7b7ae393c84b8ed24c32612",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 79,
"avg_line_length": 35.64065708418891,
"alnum_prop": 0.5277409690614737,
"repo_name": "gforcada/jenkins-job-builder",
"id": "60685a9583d36ea4308ac85e96aa0d313f6fdaa8",
"size": "17992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jenkins_jobs/builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "465"
},
{
"name": "C++",
"bytes": "737"
},
{
"name": "HTML",
"bytes": "144"
},
{
"name": "PHP",
"bytes": "334"
},
{
"name": "Pawn",
"bytes": "57"
},
{
"name": "Python",
"bytes": "1335234"
},
{
"name": "Shell",
"bytes": "7552"
},
{
"name": "SourcePawn",
"bytes": "63"
}
],
"symlink_target": ""
}
|
r"""Tool to inspect a model."""
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
from PIL import Image
import tensorflow as tf
import hparams_config
import utils
from tf2 import infer_lib
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model.')
flags.DEFINE_enum('mode', 'infer',
['infer', 'dry', 'export', 'benchmark', 'video'], 'Run mode.')
flags.DEFINE_string('trace_filename', None, 'Trace file name.')
flags.DEFINE_integer('bm_runs', 10, 'Number of benchmark runs.')
flags.DEFINE_enum('tensorrt', '', ['', 'FP32', 'FP16', 'INT8'],
'TensorRT mode.')
flags.DEFINE_integer('batch_size', 1, 'Batch size for inference.')
flags.DEFINE_integer('image_size', -1, 'Input image size for inference.')
flags.DEFINE_string('model_dir', '_', 'checkpoint dir used for eval.')
flags.DEFINE_string('export_ckpt', None, 'Output model ckpt path.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_string('input_image', None, 'Input image path for inference.')
flags.DEFINE_string('output_image_dir', None, 'Output dir for inference.')
# For video.
flags.DEFINE_string('input_video', None, 'Input video path for inference.')
flags.DEFINE_string('output_video', None,
'Output video path. If None, play it online instead.')
# For saved model.
flags.DEFINE_string('saved_model_dir', None, 'Folder path for saved model.')
flags.DEFINE_enum('tflite', '', ['', 'FP32', 'FP16', 'INT8'], 'tflite type.')
flags.DEFINE_string('file_pattern', None,
'Glob for tfrecords, e.g. coco/val-*.tfrecord.')
flags.DEFINE_integer(
'num_calibration_steps', 500,
'Number of post-training quantization calibration steps to run.')
flags.DEFINE_bool('debug', False, 'Debug mode.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA,'
' and this flag has no effect.')
flags.DEFINE_bool('only_network', False, 'Model only contains network')
FLAGS = flags.FLAGS
def main(_):
tf.config.run_functions_eagerly(FLAGS.debug)
tf.config.optimizer.set_jit(FLAGS.use_xla)
devices = tf.config.list_physical_devices('GPU')
for device in devices:
tf.config.experimental.set_memory_growth(device, True)
model_config = hparams_config.get_detection_config(FLAGS.model_name)
model_config.override(FLAGS.hparams) # Add custom overrides
model_config.is_training_bn = False
if FLAGS.image_size != -1:
model_config.image_size = FLAGS.image_size
model_config.image_size = utils.parse_image_size(model_config.image_size)
model_params = model_config.as_dict()
if FLAGS.mode == 'export':
driver = infer_lib.KerasDriver(FLAGS.model_dir, FLAGS.debug,
FLAGS.model_name, FLAGS.batch_size or None,
FLAGS.only_network, model_params)
if not FLAGS.saved_model_dir:
raise ValueError('Please specify --saved_model_dir=')
model_dir = FLAGS.saved_model_dir
if tf.io.gfile.exists(model_dir):
tf.io.gfile.rmtree(model_dir)
driver.export(model_dir, FLAGS.tensorrt, FLAGS.tflite, FLAGS.file_pattern,
FLAGS.num_calibration_steps)
print('Model are exported to %s' % model_dir)
elif FLAGS.mode == 'infer':
driver = infer_lib.ServingDriver.create(
FLAGS.model_dir, FLAGS.debug, FLAGS.saved_model_dir, FLAGS.model_name,
FLAGS.batch_size or None, FLAGS.only_network, model_params)
image_file = tf.io.read_file(FLAGS.input_image)
image_arrays = tf.io.decode_image(
image_file, channels=3, expand_animations=False)
image_arrays = tf.expand_dims(image_arrays, axis=0)
detections_bs = driver.serve(image_arrays)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections_bs)
img = driver.visualize(
np.array(image_arrays)[0],
boxes[0],
classes[0],
scores[0],
min_score_thresh=model_config.nms_configs.score_thresh or 0.4,
max_boxes_to_draw=model_config.nms_configs.max_output_size)
output_image_path = os.path.join(FLAGS.output_image_dir, '0.jpg')
Image.fromarray(img).save(output_image_path)
print('writing file to %s' % output_image_path)
elif FLAGS.mode == 'benchmark':
driver = infer_lib.ServingDriver.create(
FLAGS.model_dir, FLAGS.debug, FLAGS.saved_model_dir, FLAGS.model_name,
FLAGS.batch_size or None, FLAGS.only_network, model_params)
batch_size = FLAGS.batch_size or 1
if FLAGS.input_image:
image_file = tf.io.read_file(FLAGS.input_image)
image_arrays = tf.io.decode_image(
image_file, channels=3, expand_animations=False)
image_arrays = tf.image.resize_with_pad(image_arrays,
*model_config.image_size)
image_arrays = tf.cast(tf.expand_dims(image_arrays, 0), tf.uint8)
if batch_size > 1:
image_arrays = tf.tile(image_arrays, [batch_size, 1, 1, 1])
else:
# use synthetic data if no image is provided.
image_arrays = tf.ones((batch_size, *model_config.image_size, 3),
dtype=tf.uint8)
if FLAGS.only_network:
image_arrays, _ = driver._preprocess(image_arrays)
driver.benchmark(image_arrays, FLAGS.bm_runs, FLAGS.trace_filename)
elif FLAGS.mode == 'dry':
# transfer to tf2 format ckpt
driver = infer_lib.KerasDriver(FLAGS.model_dir, FLAGS.debug,
FLAGS.model_name, FLAGS.batch_size or None,
FLAGS.only_network, model_params)
if FLAGS.export_ckpt:
driver.model.save_weights(FLAGS.export_ckpt)
elif FLAGS.mode == 'video':
import cv2 # pylint: disable=g-import-not-at-top
driver = infer_lib.ServingDriver.create(
FLAGS.model_dir, FLAGS.debug, FLAGS.saved_model_dir, FLAGS.model_name,
FLAGS.batch_size or None, FLAGS.only_network, model_params)
cap = cv2.VideoCapture(FLAGS.input_video)
if not cap.isOpened():
print('Error opening input video: {}'.format(FLAGS.input_video))
out_ptr = None
if FLAGS.output_video:
frame_width, frame_height = int(cap.get(3)), int(cap.get(4))
out_ptr = cv2.VideoWriter(FLAGS.output_video,
cv2.VideoWriter_fourcc('m', 'p', '4', 'v'),
cap.get(5), (frame_width, frame_height))
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if not ret:
break
raw_frames = np.array([frame])
detections_bs = driver.serve(raw_frames)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections_bs)
new_frame = driver.visualize(
raw_frames[0],
boxes[0],
classes[0],
scores[0],
min_score_thresh=model_config.nms_configs.score_thresh or 0.4,
max_boxes_to_draw=model_config.nms_configs.max_output_size)
if out_ptr:
# write frame into output file.
out_ptr.write(new_frame)
else:
# show the frame online, mainly used for real-time speed test.
cv2.imshow('Frame', new_frame)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
|
{
"content_hash": "b6ed18526dcde2f8a182187b3067cca0",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 80,
"avg_line_length": 40.70652173913044,
"alnum_prop": 0.6443257676902536,
"repo_name": "google/automl",
"id": "ae0112686cedbe81f07da9fc803a0893731d08df",
"size": "8172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "efficientdet/tf2/inspector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1782347"
},
{
"name": "Python",
"bytes": "1051435"
},
{
"name": "Shell",
"bytes": "1708"
}
],
"symlink_target": ""
}
|
"""slotview is an interactive tool to analyze slotmode data.
The input for the file is either the same input into slotphot or the output from slotphot
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import time, math
import numpy as np
import scipy as sp
from pyraf import iraf
from pyraf.iraf import pysalt
import saltprint, salttime
import slottool as st
import Tkinter as Tk
from matplotlib.widgets import Cursor, SpanSelector, Slider, CheckButtons
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
# Salt imports
from saltgui import ImageDisplay, MplCanvas
from salterror import SaltIOError
import saltsafeio as saltio
import saltsafekey as saltkey
from saltsafelog import logging
from SlotViewWindow import SlotViewWindow
debug=True
# Make sure the plotting functions work with an older version of matplotlib
try:
import matplotlib.pyplot as plt
except ImportError:
import matplotlib.pylab as plt
def slotview(newfits,indata , fileout, srcfile, fps=10.0, phottype='square', sigdet=5, contpix=10, \
driftlimit=10, clobber=True,logfile='slotview.log',verbose=True):
#set up the variables
status = 0
entries = []
vig_lo = {}
vig_hi = {}
hour = 0
min = 0
sec = 0.
time0 = 0.
nframes = 0
sleep=0
with logging(logfile,debug) as log:
#enter in the input data
saltio.fileexists(newfits)
#set the sleep parameter
if fps>0: sleep=1.0/(fps)
# read in the data file
id, time, ratio, rerr, tx, ty, tflux, terr, cx, cy, cflux, cerr=st.readlcfile(indata)
# read extraction region defintion file
amp, x, y, x_o, y_o, r, br1, br2=st.readsrcfile(srcfile)
#determine the size of the data arrays
struct = saltio.openfits(newfits)
naxis1 = saltkey.get('NAXIS1',struct[1])
naxis2 = saltkey.get('NAXIS2',struct[1])
# Plot all of the data and the first image
# Create GUI
App = QtGui.QApplication([])
aw=SlotViewWindow(struct, id, tflux, cflux, ratio, time, phottype, sleep, \
tx, ty, cx, cy, r, br1, br2, naxis1, naxis2, sigdet, contpix, driftlimit)
aw.show()
# Start application event loop
app_exit=App.exec_()
# Check if GUI was executed succesfully
if app_exit!=0:
raise SALTError('InterIdentify GUI has unexpected exit status '+str(exit))
ratio, tflux, cflux, gframe, newphot=aw.ratio, aw.tflux, aw.cflux, aw.goodframes, aw.newphot
#close the input file
saltio.closefits(struct)
# Update the indata file if necessary
lc=saltio.openascii(fileout,'w')
for i in range(len(ratio)):
x['target']=tx[i]
x['comparison']=cx[i]
y['target']=ty[i]
y['comparison']=cy[i]
reltime=False
if gframe[i]:
st.writedataout(lc, id[i], time[i], x, y, tflux[i], terr[i], \
cflux[i], cerr[i], ratio[i], rerr[i], time[0], reltime)
saltio.closeascii(lc)
# -----------------------------------------------------------
# Plot the data
class makeplotdata(QtGui.QMainWindow):
def __init__(self, struct, pid, tflux, cflux, ratio, time, phottype, sleep, vig_lo, vig_hi, \
tx, ty, cx, cy, r, br1, br2, naxis1, naxis2, clobber, logfile, verbose):
"""As the data is measured, plots the target and companion, the drift, both light curves and the ratio
returns status
"""
#set up the variables
status=0
maxcolumn=7
self.struct = struct
self.infile=struct._HDUList__file.name
self.pid=pid
self.dtime=time.copy()
self.tflux=tflux
self.cflux=cflux
self.ratio=ratio
self.min_xlim=10
self.radius=r['comparison']
self.r=r
self.br1=br1
self.br2=br2
self.tx=tx
self.ty=ty
self.cx=cx
self.cy=cy
self.phottype=phottype
self.naxis1=naxis1
self.naxis2=naxis2
self.logfile=logfile
self.clobber=clobber
self.verbose=verbose
self.fft=False
self.stopplay=False
self.sleep=sleep
self.zbox=[]
self.newphot=0
self.npoint=4
if self.phottype=='circular':
self.npoint=24
if status==0:
self.id=0
self.nframes=len(self.struct)
self.header=self.struct[int(self.pid[self.id])].header
self.goodframes=self.dtime*0+1
# Setup widget
QtGui.QMainWindow.__init__(self)
# Set main widget
self.main = QtGui.QWidget(self)
# Set window title
self.setWindowTitle("Slotview: "+self.infile)
#self.root.bind("<Destroy>", self.destroy)
#self.root.bind("D", self.deleteframe)
#self.root.bind("u", self.undeleteframe)
#self.root.bind("n", self.call_playone)
#self.root.bind("b", self.call_revone)
#self.root.bind("?", self.help)
#self.root.bind("q", self.destroy)
#self.root.bind("<Button-1>", self.callback)
#set up the variables for which graphs to plot
#self.ratiovar=Tk.IntVar(master=self.root, value=1)
#self.star1var=Tk.IntVar(master=self.root, value=0)
#self.star2var=Tk.IntVar(master=self.root, value=0)
#self.slotfig=plt.figure(figsize=(8,1.5),dpi=72)
#plot the data
#self.plotdataarray()
#self.lcfig=plt.figure(figsize=(8,5),dpi=72)
#plot the light curve
#self.lcx1=self.dtime.min()
#self.lcx2=self.dtime.max()
#self.plotlightcurve()
inrow=4
lcrow=0
pcrow=1
darow=2
cprow=3
qurow=5
#add light curve plot
#self.lccanvas = FigureCanvasTkAgg(self.lcfig, master=self.root)
#self.lccanvas.show()
#self.lccanvas.get_tk_widget().grid(row = lcrow, column = 0, columnspan = maxcolumn, sticky = 'news')
#self.lccanvas.mpl_connect('button_press_event',self.lcpickstar)
#self.lccanvas.mpl_connect('motion_notify_event',self.lcdrawbox)
#self.lccanvas.mpl_connect('button_release_event',self.lczoom)
#add data array plot
#self.canvas = FigureCanvasTkAgg(self.slotfig, master=self.root)
#self.canvas.show()
#self.canvas.blit()
#self.canvas.get_tk_widget().grid(row = darow, column = 0, columnspan = maxcolumn, sticky = 'news')
#self.canvas.mpl_connect('key_press_event',self.newphoto)
#add the control widget
#self.cpFrame = Tk.Frame(master=self.root)
#self.cpFrame.grid(row=cprow, column=0, columnspan=maxcolumn, sticky='ew')
#self.frevbutton = Tk.Button(master=self.cpFrame, text='< <', width=5, command=self.freverse)
#self.frevbutton.grid(row=0, column=0, sticky='ew')
#self.revbutton = Tk.Button(master=self.cpFrame, text='<',width=5, command=self.reverse)
#self.revbutton.grid(row=0, column=1, sticky='ew')
#self.rev1button = Tk.Button(master=self.cpFrame, text='-',width=5, command=self.revone)
#self.rev1button.grid(row=0, column=2, sticky='ew')
#self.play1button = Tk.Button(master=self.cpFrame, text='+',width=5, command=self.playone)
#self.play1button.grid(row=0, column=4, sticky='ew')
#self.playbutton = Tk.Button(master=self.cpFrame, text='>',width=5, command=self.play)
#self.playbutton.grid(row=0, column=5, sticky='ew')
#self.fplaybutton = Tk.Button(master=self.cpFrame, text='> >',width=5, command=self.fplay)
#self.fplaybutton.grid(row=0, column=6, sticky='ew')
#self.stopbutton = Tk.Button(master=self.cpFrame, text='Stop',width=5, command=self.stop)
#self.stopbutton.grid(row=0, column=3, sticky='ew')
#add the information panel
#self.idtext= Tk.StringVar(master=self.root )
#self.imgtext= Tk.StringVar(master=self.root )
#self.timetext= Tk.StringVar(master=self.root )
#self.idLabel = Tk.Label(master=self.root, fg='#000000',textvariable=self.idtext, relief='solid')
#self.idLabel.grid(row=inrow, column=0, sticky='ew')
#self.imgLabel = Tk.Label(master=self.root, textvariable=self.imgtext, relief='solid')
#self.imgLabel.grid(row=inrow, column=1, columnspan=3, sticky='ew')
#self.timeLabel = Tk.Label(master=self.root, textvariable=self.timetext, relief='solid')
#self.timeLabel.grid(row=inrow, column=4, columnspan=3, sticky='ew')
#self.setinfolabels()
#add the plot control panel
#self.ratiobutton=Tk.Checkbutton(master=self.root, text='Flux Ratio', variable=self.ratiovar, \
# command=self.calllccheck)
#self.ratiobutton.grid(row=pcrow, column=0, sticky='ew')
#self.star1button=Tk.Checkbutton(master=self.root, text='Star1 Flux', variable=self.star1var, \
# command=self.calllccheck)
#self.star1button.grid(row=pcrow, column=1, sticky='ew')
#self.star2button=Tk.Checkbutton(master=self.root, text='Star2 Flux', variable=self.star2var, \
# command=self.calllccheck)
#self.star2button.grid(row=pcrow, column=2, sticky='ew')
#self.resetbutton = Tk.Button(master=self.root, text='Reset', command=self.callreset)
#self.resetbutton.grid(row=pcrow, column=6, sticky='ew')
#self.savebutton = Tk.Button(master=self.root, text='save', command=self.callsave)
#self.savebutton.grid(row=pcrow, column=5, sticky='ew')
#add the quit button
#self.quFrame = Tk.Frame(master=self.root)
#self.quFrame.grid(row=qurow, column=0, columnspan=maxcolumn, sticky='ew')
#self.exitbutton = Tk.Button(master=self.quFrame, text='Quit', command=self.exit)
#self.exitbutton.grid(row=0, column=3, sticky='ew')
#create the tabs
self.tabWidget=QtGui.QTabWidget()
#layout the widgets
mainLayout = QtGui.QVBoxLayout(self.main)
mainLayout.addWidget(self.tabWidget)
# Set the main widget as the central widget
self.setCentralWidget(self.main)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
return
def runplotdata(self):
Tk.mainloop()
def destroy(self, e):
self.root.quit()
return
def exit(self):
self.root.quit()
return
def help(self, e):
"""Print the help message and the key-bindings available to the user"""
helpmessage="""
The following commands are available to the user:
? - Print this information q - quit the viewer
n - Move to the next image b - move back an image
D - Delete this image u - undelete this image
p - Perform photometry on this image
P - Perform photometry starting at this image
stop button-Stop photometry or display
reset button-Reset the light curve plot
save button-Save the current light curve plot
quit button-Quit the application
Right Click-Display image corresponding to this time
Left Click and Drag-In light curve plot, zoom in on this region
"""
print helpmessage
return
def setinfolabels(self):
"""Set the text labels according to the current object displayed.
Use the image header information if it is available.
"""
#set the id number
self.idtext.set(int(self.pid[self.id]))
#set the image name
oname=''
try:
oname=self.struct[int(self.pid[self.id])].header['ONAME']
oext=self.struct[int(self.pid[self.id])].header['OEXT']
oname=oname+'[%i]'%oext
except Exception, e:
try:
oname=self.struct[0].header['OBJECT']
except:
pass
oname=self.struct[0].header['OBJECT']
self.imgtext.set(oname)
#set the time
try:
utime=self.struct[int(self.pid[self.id])].header['UTC-OBS']
self.timetext.set(utime)
except:
self.timetext.set('')
return
def calllccheck(self):
#turn the ratio curve on and off
if self.ratiovar.get():
self.lightcurve.set_visible(True)
else:
self.lightcurve.set_visible(False)
#turn the star1 curve on and off
if self.star1var.get():
self.star1curve.set_visible(True)
else:
self.star1curve.set_visible(False)
#turn the star2 curve on and off
if self.star2var.get():
self.star2curve.set_visible(True)
else:
self.star2curve.set_visible(False)
self.lcy1, self.lcy2, ylabel=self.lcylimits()
self.light_plot.set_ylim(self.lcy1, self.lcy2)
self.light_plot.set_ylabel(ylabel)
self.lccanvas.draw()
def lcylimits(self):
"""Determine the y-limts depending on what plots are selected """
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)*(self.goodframes>0)
if self.ratiovar.get():
rarr=np.compress(mask,self.ratio)
y1=rarr.min()
y2=rarr.max()
ylabel='Star1/Star2'
else:
if self.star2var.get() and self.star1var.get():
cfarr=np.compress(mask,self.cflux).max()
tfarr=np.compress(mask,self.tflux).max()
y1=0
y2=cfarr < tfarr and tfarr or cfarr
ylabel='Star Flux'
elif self.star2var.get():
cfarr=np.compress(mask,self.cflux)
y1=0
y2=cfarr.max()
ylabel='Star2 Flux'
else:
tfarr=np.compress(mask,self.tflux)
y1=0
y2=tfarr.max()
ylabel='Star1 Flux'
return y1, y2, ylabel
def newphoto(self, e):
"""program to being new photometry"""
if e.key=='c' and e.xdata and e.ydata:
cx=e.xdata
cy=e.ydata
cr=self.radius
image=self.struct[int(self.pid[self.id])].data
cimage, cx, cy = st.calcdrift(image, cx, cy, cr, self.naxis1, self.naxis2)
if cx >= 0 and cy >= 0:
self.cx[self.id]=cx
self.cy[self.id]=cy
self.updatedataplot()
if e.key=='t' and e.xdata and e.ydata:
tx=e.xdata
ty=e.ydata
tr=self.radius
image=self.struct[int(self.pid[self.id])].data
timage, tx, ty = st.calcdrift(image, tx, ty, tr, self.naxis1, self.naxis2)
if tx >= 0 and ty >= 0:
self.tx[self.id]=tx
self.ty[self.id]=ty
self.updatedataplot()
if e.key=='p':
self.redophot(self.id)
#self.updatelightcurve()
#self.lccanvas.draw()
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
#self.callreset()
if e.key=='P':
nstart=self.id+1
nend=self.nframes-1
self.redophot(self.id)
self.stopplay=True
i=nstart
while i < nend and self.stopplay:
image=self.struct[int(self.pid[self.id])].data
# these may be changed
sigdet=5
contpix=10
sigback=3
driftlimit=10
iter=3
carray, fx,fy,status = st.finddrift(image, self.cx[i-1], self.cy[i-1], self.radius, \
self.naxis1, self.naxis2, sigdet, contpix, sigback, driftlimit, iter, self.logfile)
if fx > -1 and fy > -1:
if fx < self.naxis1 and fy < self.naxis2:
dx=self.cx[i-1]-fx
dy=self.cy[i-1]-fy
self.cx[i]=fx
self.cy[i]=fy
self.tx[i]=self.tx[i-1]-dx
self.ty[i]=self.ty[i-1]-dy
else:
message='Not able to perform photometry'
print message
return
else:
message='Not able to perform photometry'
print message
return
self.redophot(i)
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
if self.dtime[i] < self.lcx1 or self.dtime[i] > self.lcx2: self.callreset()
#self.updatelightcurve()
#self.lccanvas.draw()
self.root.update()
if not self.stopplay: self.updatedataplot()
i += 1
def redophot(self, id):
self.newphot=1
self.id=id
x={}
y={}
x['target']=self.tx[self.id]
y['target']=self.ty[self.id]
x['comparison']=self.cx[self.id]
y['comparison']=self.cy[self.id]
image=self.struct[int(self.pid[self.id])].data
#these will need to be changed
gain=1
rdnoise=1
verbose=False
tflux, tflux_err, cflux, cflux_err, ratio, ratio_err, status = \
st.dophot(self. phottype, image, x, y, self.r, self.br1, self.br2, \
gain, rdnoise, self.naxis1, self.naxis2)
if status==0:
self.tflux[self.id]=tflux
self.cflux[self.id]=cflux
self.ratio[self.id]=ratio
def lcpickstar(self, e):
if e.button==1 and e.xdata:
self.id=self.findtime(e.xdata)+1
self.updatedataplot()
if e.button==3 and e.xdata:
self.xt1 = e.xdata
self.yt1 = self.lcy1
def lcdrawbox(self, e):
if e.button==3 and e.xdata:
self.xt2=e.xdata
self.yt2=self.lcy2
xp=[self.xt1, self.xt1, self.xt2, self.xt2]
yp=[self.yt1, self.yt2, self.yt2, self.yt1]
if self.zbox:
self.zbox.set_visible(False)
self.zbox,=self.light_plot.fill(xp, yp, fc='#777777', ec='#FF0000', alpha=0.5,visible=True)
self.lccanvas.draw()
def lczoom(self, e):
"""Handles time axis zoom on the light curve.
Once the 3-button is released, it will capture the new position and replot the zoomed in curve"""
if e.button==3 and e.xdata:
self.xt2=e.xdata
self.yt2=self.lcy2
if self.xt2<self.xt1:
xtemp=self.xt1
self.xt1=self.xt2
self.xt2=xtemp
self.lcx1=self.xt1
self.lcx2=self.xt2
self.lcy1=self.yt1
self.lcy2=self.yt2
if self.lcx2-self.lcx1>0:
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
if self.zbox:
self.zbox.set_visible(False)
self.lccanvas.draw()
def callsave(self):
"""Save a copy of the lc curve to a .ps file"""
self.sroot=Tk.Tk()
self.sroot.wm_title("Save File as:")
TitleLabel = Tk.Label(master=self.sroot, text='Please enter a filename for the output PS file', border=5)
TitleLabel.grid(row=0, column=0, columnspan=2, sticky='ew')
nameLabel = Tk.Label(master=self.sroot, text='Filename:', relief='solid')
nameLabel.grid(row=1, column=0, sticky='ew')
self.nametext=Tk.StringVar(master=self.sroot)
nameEntry = Tk.Entry(master=self.sroot, textvariable=self.nametext)
nameEntry.grid(row=1, column=1, sticky='ew')
nameEntry.focus_set()
self.sroot.bind('<Return>', self._finishcallsave)
return
def _finishcallsave(self, e):
status=0
self.sroot.destroy()
name=self.nametext.get()
if not name: return
if name[-3:]!='.ps': name=name+'.ps'
#remove the file if the name already exists
if saltio.filedoesnotexist(name,self.verbose, self.logfile):
if self.clobber:
os.remove(name)
else:
message = 'ERROR -- SALTVIEW: File ' + name + ' already exists, use clobber=y'
status = saltprint.err(logfile,message)
return
#turn the red dot off in the graph
self.light_point.set_visible(False)
#save the figure
self.lcfig.savefig(name)
#turn the red dot on in the graph
self.light_point.set_visible(True)
def callreset(self):
self.lcx1=self.dtime.min()
self.lcx2=self.dtime.max()
self.lcfig.delaxes(self.light_plot)
self.plotlightcurve()
self.lccanvas.draw()
def undeleteframe(self, e):
self.goodframes[self.id] = 1
message='SALTPHOT: Extension %i was undeleted' % self.pid[self.id]
saltprint.log(self.logfile, message, self.verbose)
def deleteframe(self, e):
self.newphot=1
self.goodframes[self.id] = 0
message='SALTPHOT: Extension %i was deleted' % self.pid[self.id]
saltprint.log(self.logfile, message, self.verbose)
def callback(self, e):
print e.x, e.y
def stop(self):
self.stopplay=False
def call_playone(self, e):
self.playone()
def call_revone(self, e):
self.revone()
def playone(self):
stopid = self.nframes-2
if self.id < (stopid): self.id=self.id+1
self.updatedataplot()
def play(self):
self.stopplay=True
stopid = self.nframes-2
while self.stopplay and self.id < stopid:
self.id = self.id+1
time.sleep(self.sleep)
self.updatedataplot()
self.root.update()
def fplay(self):
self.stopplay=True
stopid = self.nframes-2
while self.stopplay and self.id < stopid:
self.id = self.id+1
self.updatedataplot()
self.root.update()
def revone(self):
if self.id > 0: self.id=self.id-1
self.updatedataplot()
def reverse(self):
self.stopplay=True
while self.stopplay and self.id > 0:
self.id = self.id-1
time.sleep(self.sleep)
self.updatedataplot()
self.root.update()
def freverse(self):
self.stopplay=True
while self.stopplay and self.id > 0:
self.id = self.id-1
self.updatedataplot()
self.root.update()
def callsetfft(self, label):
if label=='FFT':
self.fft=(not self.fft)
self.plotfft()
def plotfft(self):
fftfig=plt.figure(figsize=(8,8),dpi=72)
axfft=fftfig.add_axes([0.10,0.10,0.8,0.50], autoscale_on=True)
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)
tarr=np.compress(mask,self.dtime)
rarr=np.compress(mask,self.ratio)
#ftarr=np.fft.fft(tarr)
ftarr=np.arange(len(tarr))
frarr=np.fft.fft(rarr)
axfft.hold(True)
fftcurve=axfft.plot(ftarr,frarr,linewidth=0.5,linestyle='-',marker='',color='b')
plt.show()
def slide_update(self, val):
self.id=self.findtime(val)
self.updatedataplot()
def plotdataarray(self):
"""Plot the image array
return axes
"""
self.ob_plot = self.slotfig.add_axes([0.10,0.10,0.8,0.80], autoscale_on=True)
plt.setp(plt.gca(),xticks=[],yticks=[])
plt.jet()
self.array=self.struct[int(self.pid[self.id])].data
self.imarr=self.ob_plot.imshow(self.array,origin='lower')
#Plot the apertures
self.cbox,=self.plotbox('#00FF00',self.cx[self.id],self.cy[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
self.tbox,=self.plotbox('#FFFF00',self.tx[self.id],self.ty[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
def updatedataplot(self):
"""Handle updating the light curve plot and the data array plot when the
data array image is changed
"""
#update the information panel
self.setinfolabels()
self.ptime=self.dtime[self.id]
self.pratio=self.ratio[self.id]
#Check to make the red button hasn't moved outside of the plotting area
if self.ptime < self.lcx1 or self.ptime > self.lcx2: self.callreset()
#update the red piont on the light curve plot
self.light_point.set_data(np.asarray([self.ptime]), np.asarray([self.pratio]))
#self.lcfig.delaxes(self.light_plot)
#self.plotlightcurve()
self.lccanvas.draw()
#update the data plot
self.array=self.struct[int(self.pid[self.id])].data
self.imarr.set_data(self.array)
#Plot the apertures
self.updateplotbox(self.cbox,'#00FF00',self.cx[self.id],self.cy[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
self.updateplotbox(self.tbox,'#FFFF00',self.tx[self.id],self.ty[self.id],self.radius,self.npoint,self.naxis1, self.naxis2)
self.canvas.draw()
def updateplotbox(self,box, bcolor,x,y,r,npoint, nx1,nx2):
apx,apy=self.makeplotbox(x,y,r,npoint,nx1,nx2)
box.set_data(apx, apy)
box.set_color(bcolor)
def makeplotbox(self,x,y,r,npoint, nx1,nx2):
apx=np.zeros(npoint+1)
apy=np.zeros(npoint+1)
for i in range(npoint+1):
theta=math.pi/4+2*i*math.pi/npoint
apx[i]=st.checkedge(x+math.cos(theta)*r,0,nx1)
apy[i]=st.checkedge(y+math.sin(theta)*r,0,nx2)
return apx, apy
def plotbox(self,bcolor,x,y,r,npoint, nx1,nx2):
apx,apy=self.makeplotbox(x,y,r, npoint, nx1,nx2)
return self.ob_plot.plot(apx,apy,ls='-',color=bcolor,lw=2,marker='')
def updatelightcurve(self):
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)*(self.goodframes>0)
tarr=np.compress(mask,self.dtime)
rarr=np.compress(mask,self.ratio)
self.lightcurve.set_data(tarr, rarr)
self.ptime=self.dtime[self.id]
self.pratio=self.ratio[self.id]
self.light_point.set_data(np.asarray([self.ptime]), np.asarray([self.pratio]))
def plotlightcurve(self):
"""Plot the light curve
return ax
"""
mask = (self.dtime > self.lcx1)*(self.dtime<self.lcx2)*(self.goodframes>0)
tarr=np.compress(mask,self.dtime)
rarr=np.compress(mask,self.ratio)
tfarr=np.compress(mask,self.tflux)
cfarr=np.compress(mask,self.cflux)
self.lcy1,self.lcy2, ylabel=self.lcylimits()
self.light_plot = self.lcfig.add_axes([0.10,0.10,0.8,0.80], autoscale_on=False, adjustable='datalim' )
self.light_plot.hold(True)
#plot the curve
self.lightcurve,=self.light_plot.plot(tarr,rarr,linewidth=0.5,linestyle='-',marker='',color='b')
if self.ratiovar.get():
self.lightcurve.set_visible(True)
else:
self.lightcurve.set_visible(False)
#plot the flux curve for star 1
self.star1curve,=self.light_plot.plot(tarr,tfarr,linewidth=0.5,linestyle='-',marker='',color='y')
if self.star1var.get():
self.star1curve.set_visible(True)
else:
self.star1curve.set_visible(False)
#plot the flux curve for star 1
self.star2curve,=self.light_plot.plot(tarr,cfarr,linewidth=0.5,linestyle='-',marker='',color='g')
if self.star2var.get():
self.star2curve.set_visible(True)
else:
self.star2curve.set_visible(False)
#plot a point which matches the time
self.ptime=self.dtime[self.id]
self.pratio=self.ratio[self.id]
self.light_point,=self.light_plot.plot(np.asarray([self.ptime]), np.asarray([self.pratio]), linestyle='', marker='o', mec='#FF0000', mfc='#FF0000')
self.light_plot.set_xlim(self.lcx1, self.lcx2)
self.light_plot.set_ylim(self.lcy1, self.lcy2)
self.light_plot.set_ylabel(ylabel)
self.light_plot.set_xlabel('Time (s)')
def findtime(self, x):
arr=abs(self.dtime-x)
return arr.argmin()
def onselect(self, vmin, vmax):
pass
def release(self, event):
ax=event.inaxes
if ax==self.light_plot and abs(event.xdata-self.xt)>self.min_xlim:
if (event.xdata > self.xt):
self.lcx1=self.xt
self.lcx2=event.xdata
else:
self.lcx2=self.xt
self.lcx1=event.xdata
self.plotlightcurve()
del self.stime
self.stime = plt.Slider(self.axslid, 'Time', self.lcx1, self.lcx2, valinit=1, valfmt='%1.2f')
self.stime.on_changed(self.slide_update)
def onpress(self, event):
if event.button!=1: return
ax=event.inaxes
if ax==self.light_plot:
self.xt = event.xdata
self.stime.set_val(self.xt)
self.id=self.findtime(self.xt)
self.updatedataplot()
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("slottools$slotview.par")
t = iraf.IrafTaskFactory(taskname="slotview",value=parfile,function=slotview, pkgname='slottools')
|
{
"content_hash": "075672b0ad7fe5954299ffdff4124586",
"timestamp": "",
"source": "github",
"line_count": 858,
"max_line_length": 155,
"avg_line_length": 34.616550116550115,
"alnum_prop": 0.5812598902393858,
"repo_name": "crawfordsm/pysalt",
"id": "b55cac0ff9fbd8fcaf6aeedef9e36d284a7168c6",
"size": "32089",
"binary": false,
"copies": "1",
"ref": "refs/heads/placeholder",
"path": "slottools/old_slotview.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9334"
},
{
"name": "Common Lisp",
"bytes": "19932"
},
{
"name": "Makefile",
"bytes": "856"
},
{
"name": "Python",
"bytes": "1381161"
},
{
"name": "Smalltalk",
"bytes": "271"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import sys #(just for version number)
#import matplotlib #(just for version number)
#print('Python version ' + sys.version)
#print('Pandas version ' + pd.__version__)
#print('Matplotlib version ' + matplotlib.__version__)
# In[ ]:
file_name = '../crumb_data.csv'
df = pd.read_csv(file_name, names=['Drug','Channel','Experiment','Concentration','Inhibition'],skiprows=1)
df
# In[ ]:
drug_and_channel = df[['Concentration','Inhibition']][df['Drug'] == 'Amiodarone'][df['Channel'] == 'Cav1.2']
drug_and_channel
drug_and_channel.values
# In[ ]:
drugs = df.Drug.unique()
print(drugs)
# In[ ]:
channels = df.Channel.unique()
print(channels)
# In[ ]:
for drug in drugs:
for channel in channels:
drug_and_channel_values = df[['Concentration','Inhibition']][df['Drug'] == drug][df['Channel'] == channel]
print(drug,channel)
print(drug_and_channel_values)
# In[ ]:
|
{
"content_hash": "59cd0cc4084e53d420e7a20371db86ff",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 114,
"avg_line_length": 19.294117647058822,
"alnum_prop": 0.6504065040650406,
"repo_name": "mirams/PyHillFit",
"id": "10a6c8d96c3675a7b145309c06a21c2867f37d1a",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/Crumb_data/Crumb_data_loading.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "22389"
},
{
"name": "MATLAB",
"bytes": "13290"
},
{
"name": "Python",
"bytes": "127118"
}
],
"symlink_target": ""
}
|
from django import template
from django.template import Node, TemplateSyntaxError, Variable
from django.template.base import render_value_in_context
from django.utils import six
from django.utils.safestring import SafeData, mark_safe
from wagtailsystemtext.utils import systemtext
from wagtailsystemtext.models import SystemString
register = template.Library()
class TranslateNode(Node):
def __init__(self, filter_expression, group, asvar=None, default=None,
message_context=None):
self.noop = True
self.asvar = asvar
self.default = default
self.group = group
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
# Restore percent signs. Percent signs in template text are doubled
# so they are not interpreted as string format flags.
is_safe = isinstance(value, SafeData)
value = value.replace('%%', '%')
value = systemtext(value, group=self.group, default=self.default)
value = mark_safe(value) if is_safe else value
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
@register.tag("systemtext")
def do_systemtext(parser, token):
bits = token.split_contents()
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
asvar = None
default = None
message_context = None
seen = set()
group = SystemString.DEFAULT_GROUP
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'group':
value = remaining.pop(0)[1:-1]
group = value
elif option == 'default':
value = remaining.pop(0)[1:-1]
default = value
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, group, asvar, default, message_context)
|
{
"content_hash": "c5d5e4972c55e590ffcec7082ec378a4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 93,
"avg_line_length": 35.662921348314605,
"alnum_prop": 0.5979836168872086,
"repo_name": "Frojd/wagtail-systemtext",
"id": "8e93be570703b88f55e60d7d50c981f9bda6e0c1",
"size": "3174",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "wagtailsystemtext/templatetags/systemtext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34200"
},
{
"name": "Shell",
"bytes": "1049"
}
],
"symlink_target": ""
}
|
"""Integrates Native Apps to Home Assistant."""
from homeassistant import config_entries
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.components.webhook import async_register as webhook_register
from homeassistant.helpers import device_registry as dr, discovery
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (ATTR_DEVICE_ID, ATTR_DEVICE_NAME,
ATTR_MANUFACTURER, ATTR_MODEL, ATTR_OS_VERSION,
DATA_BINARY_SENSOR, DATA_CONFIG_ENTRIES, DATA_DELETED_IDS,
DATA_DEVICES, DATA_SENSOR, DATA_STORE, DOMAIN, STORAGE_KEY,
STORAGE_VERSION)
from .http_api import RegistrationsView
from .webhook import handle_webhook
from .websocket_api import register_websocket_handlers
DEPENDENCIES = ['device_tracker', 'http', 'webhook']
REQUIREMENTS = ['PyNaCl==1.3.0']
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the mobile app component."""
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
app_config = await store.async_load()
if app_config is None:
app_config = {
DATA_BINARY_SENSOR: {},
DATA_CONFIG_ENTRIES: {},
DATA_DELETED_IDS: [],
DATA_DEVICES: {},
DATA_SENSOR: {}
}
hass.data[DOMAIN] = {
DATA_BINARY_SENSOR: app_config.get(DATA_BINARY_SENSOR, {}),
DATA_CONFIG_ENTRIES: {},
DATA_DELETED_IDS: app_config.get(DATA_DELETED_IDS, []),
DATA_DEVICES: {},
DATA_SENSOR: app_config.get(DATA_SENSOR, {}),
DATA_STORE: store,
}
hass.http.register_view(RegistrationsView())
register_websocket_handlers(hass)
for deleted_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
try:
webhook_register(hass, DOMAIN, "Deleted Webhook", deleted_id,
handle_webhook)
except ValueError:
pass
hass.async_create_task(discovery.async_load_platform(
hass, 'notify', DOMAIN, {}, config))
return True
async def async_setup_entry(hass, entry):
"""Set up a mobile_app entry."""
registration = entry.data
webhook_id = registration[CONF_WEBHOOK_ID]
hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id] = entry
device_registry = await dr.async_get_registry(hass)
identifiers = {
(ATTR_DEVICE_ID, registration[ATTR_DEVICE_ID]),
(CONF_WEBHOOK_ID, registration[CONF_WEBHOOK_ID])
}
device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=identifiers,
manufacturer=registration[ATTR_MANUFACTURER],
model=registration[ATTR_MODEL],
name=registration[ATTR_DEVICE_NAME],
sw_version=registration[ATTR_OS_VERSION]
)
hass.data[DOMAIN][DATA_DEVICES][webhook_id] = device
registration_name = 'Mobile App: {}'.format(registration[ATTR_DEVICE_NAME])
webhook_register(hass, DOMAIN, registration_name, webhook_id,
handle_webhook)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry,
DATA_BINARY_SENSOR))
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, DATA_SENSOR))
return True
@config_entries.HANDLERS.register(DOMAIN)
class MobileAppFlowHandler(config_entries.ConfigFlow):
"""Handle a Mobile App config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
placeholders = {
'apps_url':
'https://www.home-assistant.io/components/mobile_app/#apps'
}
return self.async_abort(reason='install_app',
description_placeholders=placeholders)
async def async_step_registration(self, user_input=None):
"""Handle a flow initialized during registration."""
return self.async_create_entry(title=user_input[ATTR_DEVICE_NAME],
data=user_input)
|
{
"content_hash": "79126a77a40b70f90961a92d4f4eec3c",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 35.00833333333333,
"alnum_prop": 0.6393715781956677,
"repo_name": "jamespcole/home-assistant",
"id": "a4ae78959cf3675e9ea22373c887ffd07aaf2e38",
"size": "4201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/mobile_app/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Add ``is_encrypted`` column to variable table
Revision ID: 1968acfc09e3
Revises: bba5a7cfc896
Create Date: 2016-02-02 17:20:55.692295
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '1968acfc09e3'
down_revision = 'bba5a7cfc896'
branch_labels = None
depends_on = None
airflow_version = '1.7.0'
def upgrade():
op.add_column('variable', sa.Column('is_encrypted', sa.Boolean, default=False))
def downgrade():
op.drop_column('variable', 'is_encrypted')
|
{
"content_hash": "14cbb33e43a06866177f47765359b8dc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 21.423076923076923,
"alnum_prop": 0.7289048473967684,
"repo_name": "nathanielvarona/airflow",
"id": "61e660361bcd04554cc2590498e61540b9c9037e",
"size": "1344",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/migrations/versions/0014_1_7_0_add_is_encrypted_column_to_variable_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_platform_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
self.mac_toolchain_dir = None
self.header_map_path = None
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def IsIosFramework(self):
return self.spec['type'] == 'shared_library' and self._IsBundle() and \
self.isIOS
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest()
def _IsXCTest(self):
return int(self.spec.get('mac_xctest_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcrun', '--sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _XcodePlatformPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root not in XcodeSettings._platform_path_cache:
platform_path = self._GetSdkVersionInfoItem(sdk_root,
'--show-sdk-platform-path')
XcodeSettings._platform_path_cache[sdk_root] = platform_path
return XcodeSettings._platform_path_cache[sdk_root]
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self.header_map_path:
cflags.append('-I%s' % self.header_map_path)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
# In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
# llvm-gcc. It also requires a fairly recent libtool, and
# if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
# path to the libLTO.dylib that matches the used clang.
if self._Test('LLVM_LTO', 'YES', default='NO'):
cflags.append('-flto')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
platform_root = self._XcodePlatformPath(configname)
if platform_root and self._IsXCTest():
cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
platform_root = self._XcodePlatformPath(configname)
if sdk_root and platform_root and self._IsXCTest():
ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
ldflags.append('-framework XCTest')
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
if XcodeVersion() < '0900':
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
else:
ldflags.append('-e _NSExtensionMain')
ldflags.append('-fapplication-extension')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension())
and self._IsBundle()):
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and
(self.spec['type'] == 'executable' or self._IsXCTest()) or
self.IsIosFramework()):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
# Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
# ".dylib" without providing a real support for them. What it does, for
# "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
# library order and cause collision when building Chrome.
#
# Instead substitude ".tbd" to ".dylib" in the generated project when the
# following conditions are both true:
# - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
# - the ".dylib" file does not exists but a ".tbd" file do.
library = l.replace('$(SDKROOT)', sdk_root)
if l.startswith('$(SDKROOT)'):
basename, ext = os.path.splitext(library)
if ext == '.dylib' and not os.path.exists(library):
tbd_library = basename + '.tbd'
if os.path.exists(tbd_library):
library = tbd_library
return library
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
compiler = self.xcode_settings[configname].get('GCC_VERSION')
if compiler is not None:
cache['DTCompiler'] = compiler
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version')
cache['DTSDKName'] = sdk_root + (sdk_version or '')
if xcode >= '0720':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, '--show-sdk-build-version')
elif xcode >= '0430':
cache['DTSDKBuild'] = sdk_version
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['MinimumOSVersion'] = self.xcode_settings[configname].get(
'IPHONEOS_DEPLOYMENT_TARGET')
cache['DTPlatformName'] = sdk_root
cache['DTPlatformVersion'] = sdk_version
if configname.endswith("iphoneos"):
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
cache['DTPlatformBuild'] = cache['DTSDKBuild']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
# This is weird, but Xcode sets DTPlatformBuild to an empty field
# for simulator builds.
cache['DTPlatformBuild'] = ""
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \
(int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_FRAMEWORKS_DIR' : built_products_dir,
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
'XCODE_VERSION_ACTUAL' : XcodeVersion()[0],
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if xcode_settings.mac_toolchain_dir:
env['DEVELOPER_DIR'] = xcode_settings.mac_toolchain_dir
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
|
{
"content_hash": "3fda62a2c7538206a0a12ecd0b7a467d",
"timestamp": "",
"source": "github",
"line_count": 1677,
"max_line_length": 191,
"avg_line_length": 39.97376267143709,
"alnum_prop": 0.6623456053463811,
"repo_name": "adobe/brackets-shell",
"id": "2dec19e5d6fb548bd2002c37f88dbcf0086eecab",
"size": "67036",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "gyp/pylib/gyp/xcode_emulation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2522"
},
{
"name": "C",
"bytes": "12938"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "686878"
},
{
"name": "JavaScript",
"bytes": "147266"
},
{
"name": "Objective-C",
"bytes": "25099"
},
{
"name": "Objective-C++",
"bytes": "162732"
},
{
"name": "Python",
"bytes": "1337028"
},
{
"name": "Shell",
"bytes": "23583"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django_pandas.managers import DataFrameManager, PassThroughManager
@python_2_unicode_compatible
class MyModel(models.Model):
index_col = models.CharField(max_length=1)
col1 = models.IntegerField()
col2 = models.FloatField(null=True)
col3 = models.FloatField(null=True)
col4 = models.IntegerField()
def __str__(self):
return "{} {} {} {}".format(
self.index_col,
self.col1,
self.col2,
self.col3,
self.col4
)
class MyModelChoice(models.Model):
CHOICES = [
(1, 'First'),
(2, 'Second'),
(3, 'Third'),
]
col1 = models.IntegerField(choices=CHOICES)
col2 = models.FloatField(null=True)
objects = DataFrameManager()
@python_2_unicode_compatible
class DataFrame(models.Model):
index = models.CharField(max_length=1)
col1 = models.IntegerField()
col2 = models.FloatField()
col3 = models.FloatField()
col4 = models.IntegerField()
objects = DataFrameManager()
def __str__(self):
return "{} {} {} {}".format(
self.index,
self.col1,
self.col2,
self.col3,
self.col4
)
@python_2_unicode_compatible
class LongTimeSeries(models.Model):
date_ix = models.DateTimeField()
series_name = models.CharField(max_length=100)
value = models.FloatField()
objects = DataFrameManager()
def __str__(self):
return "{} {} {}".format(self.date_ix,
self.series_name,
self.value)
@python_2_unicode_compatible
class WideTimeSeries(models.Model):
date_ix = models.DateTimeField()
col1 = models.FloatField()
col2 = models.FloatField()
col3 = models.FloatField()
col4 = models.FloatField()
objects = DataFrameManager()
def __str__(self):
return "{} {} {} {}".format(
self.date_ix,
self.col1,
self.col2,
self.col3,
self.col4
)
@python_2_unicode_compatible
class PivotData(models.Model):
row_col_a = models.CharField(max_length=15)
row_col_b = models.CharField(max_length=15)
row_col_c = models.CharField(max_length=15)
value_col_d = models.FloatField()
value_col_e = models.FloatField()
value_col_f = models.FloatField()
objects = DataFrameManager()
def __str__(self):
return "{0} {1} {2} {3} {4} {5}".format(
self.row_col_a, self.row_col_b, self.row_col_c,
self.value_col_d, self.value_col_e, self.value_col_f
)
@python_2_unicode_compatible
class Trader(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Security(models.Model):
symbol = models.CharField(max_length=20)
isin = models.CharField(max_length=20)
def __str__(self):
return "{0}-{1}".format(self.isin, self.symbol)
@python_2_unicode_compatible
class TradeLogNote(models.Model):
note = models.TextField()
def __str__(self):
return self.note
@python_2_unicode_compatible
class TradeLog(models.Model):
trader = models.ForeignKey(Trader)
symbol = models.ForeignKey(Security, null=True)
log_datetime = models.DateTimeField()
price = models.FloatField()
volume = models.IntegerField()
note = models.OneToOneField(TradeLogNote)
objects = DataFrameManager()
def __str__(self):
return "{0}-{1}-{2}-{3}-{4}-{5}".format(
self.trader,
self.symbol,
self.log_datetime,
self.price,
self.volume,
self.note
)
@python_2_unicode_compatible
class Portfolio(models.Model):
name = models.CharField(max_length=20)
securities = models.ManyToManyField(Security)
def __str__(self):
return self.name
class DudeQuerySet(models.query.QuerySet):
def abiding(self):
return self.filter(abides=True)
def rug_positive(self):
return self.filter(has_rug=True)
def rug_negative(self):
return self.filter(has_rug=False)
def by_name(self, name):
return self.filter(name__iexact=name)
class AbidingManager(PassThroughManager):
def get_queryset(self):
return DudeQuerySet(self.model).abiding()
get_query_set = get_queryset
def get_stats(self):
return {
"abiding_count": self.count(),
"rug_count": self.rug_positive().count(),
}
class Dude(models.Model):
abides = models.BooleanField(default=True)
name = models.CharField(max_length=20)
has_rug = models.BooleanField(default=False)
objects = PassThroughManager(DudeQuerySet)
abiders = AbidingManager()
class Car(models.Model):
name = models.CharField(max_length=20)
owner = models.ForeignKey(Dude, related_name='cars_owned')
objects = PassThroughManager(DudeQuerySet)
class SpotManager(PassThroughManager):
def get_queryset(self):
return super(SpotManager, self).get_queryset().filter(secret=False)
get_query_set = get_queryset
class SpotQuerySet(models.query.QuerySet):
def closed(self):
return self.filter(closed=True)
def secured(self):
return self.filter(secure=True)
class Spot(models.Model):
name = models.CharField(max_length=20)
secure = models.BooleanField(default=True)
closed = models.BooleanField(default=False)
secret = models.BooleanField(default=False)
owner = models.ForeignKey(Dude, related_name='spots_owned')
objects = SpotManager.for_queryset_class(SpotQuerySet)()
|
{
"content_hash": "2488ca962b2e8539cbb73ffa4a2ccb88",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 75,
"avg_line_length": 25.25438596491228,
"alnum_prop": 0.6262591177492185,
"repo_name": "perpetua1/django-pandas",
"id": "3bd96f364693e077c4e93f06e6e8c56727a81491",
"size": "5758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_pandas/tests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1306"
},
{
"name": "Python",
"bytes": "48804"
}
],
"symlink_target": ""
}
|
__author__ = 'Stavros Korokithakis'
__email__ = 'hi@stavros.io'
__version__ = '0.1.0'
from .requests_guard import guard, guard_iter
|
{
"content_hash": "b520b8605ccd49275e5e549f5e929d35",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 26.6,
"alnum_prop": 0.6616541353383458,
"repo_name": "skorokithakis/requests-guard",
"id": "7ee073bd1084b5e6fa063fcaf3ba5799a42d0670",
"size": "158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "requests_guard/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1724"
},
{
"name": "Python",
"bytes": "3571"
}
],
"symlink_target": ""
}
|
"""Decorators to provide authorization for the cloud datastore admin
service.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
def can_perform_cron_tasks(handler):
"""Decorator to ensure that the handler is being called by cron or by a
superadmin of the application.
"""
def test_can_perform(self, **kwargs):
"""Checks if the handler is called by cron or by a superadmin of the
application.
Args:
**kwargs: *. Keyword arguments.
Returns:
*. The return value of the decorated function.
Raises:
UnauthorizedUserException. The user does not have
credentials to access the page.
"""
if (self.request.headers.get('X-AppEngine-Cron') is None and
not self.is_super_admin):
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
else:
return handler(self, **kwargs)
test_can_perform.__wrapped__ = True
return test_can_perform
|
{
"content_hash": "d73a099f155585068e9c9a5e89ceef19",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 32.583333333333336,
"alnum_prop": 0.6325660699062233,
"repo_name": "prasanna08/oppia",
"id": "ec69d372b0fee702ef784e8e470d0b1c9f7c7524",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "export/acl_decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97795"
},
{
"name": "HTML",
"bytes": "1128491"
},
{
"name": "JavaScript",
"bytes": "733121"
},
{
"name": "Python",
"bytes": "9362251"
},
{
"name": "Shell",
"bytes": "10639"
},
{
"name": "TypeScript",
"bytes": "6077851"
}
],
"symlink_target": ""
}
|
"""Support for MQTT climate devices."""
import functools
import logging
import voluptuous as vol
from homeassistant.components import climate
from homeassistant.components.climate import (
PLATFORM_SCHEMA as CLIMATE_PLATFORM_SCHEMA,
ClimateEntity,
)
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_NAME,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_TEMPERATURE_UNIT,
CONF_VALUE_TEMPLATE,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType
from . import MQTT_BASE_PLATFORM_SCHEMA, PLATFORMS, subscription
from .. import mqtt
from .const import CONF_QOS, CONF_RETAIN, DOMAIN
from .debug_info import log_messages
from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MQTT HVAC"
CONF_ACTION_TEMPLATE = "action_template"
CONF_ACTION_TOPIC = "action_topic"
CONF_AUX_COMMAND_TOPIC = "aux_command_topic"
CONF_AUX_STATE_TEMPLATE = "aux_state_template"
CONF_AUX_STATE_TOPIC = "aux_state_topic"
CONF_AWAY_MODE_COMMAND_TOPIC = "away_mode_command_topic"
CONF_AWAY_MODE_STATE_TEMPLATE = "away_mode_state_template"
CONF_AWAY_MODE_STATE_TOPIC = "away_mode_state_topic"
CONF_CURRENT_TEMP_TEMPLATE = "current_temperature_template"
CONF_CURRENT_TEMP_TOPIC = "current_temperature_topic"
CONF_FAN_MODE_COMMAND_TEMPLATE = "fan_mode_command_template"
CONF_FAN_MODE_COMMAND_TOPIC = "fan_mode_command_topic"
CONF_FAN_MODE_LIST = "fan_modes"
CONF_FAN_MODE_STATE_TEMPLATE = "fan_mode_state_template"
CONF_FAN_MODE_STATE_TOPIC = "fan_mode_state_topic"
CONF_HOLD_COMMAND_TEMPLATE = "hold_command_template"
CONF_HOLD_COMMAND_TOPIC = "hold_command_topic"
CONF_HOLD_STATE_TEMPLATE = "hold_state_template"
CONF_HOLD_STATE_TOPIC = "hold_state_topic"
CONF_HOLD_LIST = "hold_modes"
CONF_MODE_COMMAND_TEMPLATE = "mode_command_template"
CONF_MODE_COMMAND_TOPIC = "mode_command_topic"
CONF_MODE_LIST = "modes"
CONF_MODE_STATE_TEMPLATE = "mode_state_template"
CONF_MODE_STATE_TOPIC = "mode_state_topic"
CONF_POWER_COMMAND_TOPIC = "power_command_topic"
CONF_POWER_STATE_TEMPLATE = "power_state_template"
CONF_POWER_STATE_TOPIC = "power_state_topic"
CONF_PRECISION = "precision"
CONF_SEND_IF_OFF = "send_if_off"
CONF_SWING_MODE_COMMAND_TEMPLATE = "swing_mode_command_template"
CONF_SWING_MODE_COMMAND_TOPIC = "swing_mode_command_topic"
CONF_SWING_MODE_LIST = "swing_modes"
CONF_SWING_MODE_STATE_TEMPLATE = "swing_mode_state_template"
CONF_SWING_MODE_STATE_TOPIC = "swing_mode_state_topic"
CONF_TEMP_COMMAND_TEMPLATE = "temperature_command_template"
CONF_TEMP_COMMAND_TOPIC = "temperature_command_topic"
CONF_TEMP_HIGH_COMMAND_TEMPLATE = "temperature_high_command_template"
CONF_TEMP_HIGH_COMMAND_TOPIC = "temperature_high_command_topic"
CONF_TEMP_HIGH_STATE_TEMPLATE = "temperature_high_state_template"
CONF_TEMP_HIGH_STATE_TOPIC = "temperature_high_state_topic"
CONF_TEMP_LOW_COMMAND_TEMPLATE = "temperature_low_command_template"
CONF_TEMP_LOW_COMMAND_TOPIC = "temperature_low_command_topic"
CONF_TEMP_LOW_STATE_TEMPLATE = "temperature_low_state_template"
CONF_TEMP_LOW_STATE_TOPIC = "temperature_low_state_topic"
CONF_TEMP_STATE_TEMPLATE = "temperature_state_template"
CONF_TEMP_STATE_TOPIC = "temperature_state_topic"
CONF_TEMP_INITIAL = "initial"
CONF_TEMP_MAX = "max_temp"
CONF_TEMP_MIN = "min_temp"
CONF_TEMP_STEP = "temp_step"
MQTT_CLIMATE_ATTRIBUTES_BLOCKED = frozenset(
{
climate.ATTR_AUX_HEAT,
climate.ATTR_CURRENT_HUMIDITY,
climate.ATTR_CURRENT_TEMPERATURE,
climate.ATTR_FAN_MODE,
climate.ATTR_FAN_MODES,
climate.ATTR_HUMIDITY,
climate.ATTR_HVAC_ACTION,
climate.ATTR_HVAC_MODES,
climate.ATTR_MAX_HUMIDITY,
climate.ATTR_MAX_TEMP,
climate.ATTR_MIN_HUMIDITY,
climate.ATTR_MIN_TEMP,
climate.ATTR_PRESET_MODE,
climate.ATTR_PRESET_MODES,
climate.ATTR_SWING_MODE,
climate.ATTR_SWING_MODES,
climate.ATTR_TARGET_TEMP_HIGH,
climate.ATTR_TARGET_TEMP_LOW,
climate.ATTR_TARGET_TEMP_STEP,
climate.ATTR_TEMPERATURE,
}
)
VALUE_TEMPLATE_KEYS = (
CONF_AUX_STATE_TEMPLATE,
CONF_AWAY_MODE_STATE_TEMPLATE,
CONF_CURRENT_TEMP_TEMPLATE,
CONF_FAN_MODE_STATE_TEMPLATE,
CONF_HOLD_STATE_TEMPLATE,
CONF_MODE_STATE_TEMPLATE,
CONF_POWER_STATE_TEMPLATE,
CONF_ACTION_TEMPLATE,
CONF_SWING_MODE_STATE_TEMPLATE,
CONF_TEMP_HIGH_STATE_TEMPLATE,
CONF_TEMP_LOW_STATE_TEMPLATE,
CONF_TEMP_STATE_TEMPLATE,
)
COMMAND_TEMPLATE_KEYS = {
CONF_FAN_MODE_COMMAND_TEMPLATE,
CONF_HOLD_COMMAND_TEMPLATE,
CONF_MODE_COMMAND_TEMPLATE,
CONF_SWING_MODE_COMMAND_TEMPLATE,
CONF_TEMP_COMMAND_TEMPLATE,
CONF_TEMP_HIGH_COMMAND_TEMPLATE,
CONF_TEMP_LOW_COMMAND_TEMPLATE,
}
TOPIC_KEYS = (
CONF_AUX_COMMAND_TOPIC,
CONF_AUX_STATE_TOPIC,
CONF_AWAY_MODE_COMMAND_TOPIC,
CONF_AWAY_MODE_STATE_TOPIC,
CONF_CURRENT_TEMP_TOPIC,
CONF_FAN_MODE_COMMAND_TOPIC,
CONF_FAN_MODE_STATE_TOPIC,
CONF_HOLD_COMMAND_TOPIC,
CONF_HOLD_STATE_TOPIC,
CONF_MODE_COMMAND_TOPIC,
CONF_MODE_STATE_TOPIC,
CONF_POWER_COMMAND_TOPIC,
CONF_POWER_STATE_TOPIC,
CONF_ACTION_TOPIC,
CONF_SWING_MODE_COMMAND_TOPIC,
CONF_SWING_MODE_STATE_TOPIC,
CONF_TEMP_COMMAND_TOPIC,
CONF_TEMP_HIGH_COMMAND_TOPIC,
CONF_TEMP_HIGH_STATE_TOPIC,
CONF_TEMP_LOW_COMMAND_TOPIC,
CONF_TEMP_LOW_STATE_TOPIC,
CONF_TEMP_STATE_TOPIC,
)
SCHEMA_BASE = CLIMATE_PLATFORM_SCHEMA.extend(MQTT_BASE_PLATFORM_SCHEMA.schema)
PLATFORM_SCHEMA = SCHEMA_BASE.extend(
{
vol.Optional(CONF_AUX_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AUX_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AUX_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_AWAY_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_AWAY_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_AWAY_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_CURRENT_TEMP_TEMPLATE): cv.template,
vol.Optional(CONF_CURRENT_TEMP_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_FAN_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_FAN_MODE_LIST,
default=[FAN_AUTO, FAN_LOW, FAN_MEDIUM, FAN_HIGH],
): cv.ensure_list,
vol.Optional(CONF_FAN_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_FAN_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_HOLD_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HOLD_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_HOLD_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HOLD_LIST, default=list): cv.ensure_list,
vol.Optional(CONF_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_MODE_LIST,
default=[
HVAC_MODE_AUTO,
HVAC_MODE_OFF,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
],
): cv.ensure_list,
vol.Optional(CONF_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default="ON"): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default="OFF"): cv.string,
vol.Optional(CONF_POWER_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_POWER_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_POWER_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_PRECISION): vol.In(
[PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Optional(CONF_SEND_IF_OFF, default=True): cv.boolean,
vol.Optional(CONF_ACTION_TEMPLATE): cv.template,
vol.Optional(CONF_ACTION_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SWING_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_SWING_MODE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SWING_MODE_LIST, default=[STATE_ON, HVAC_MODE_OFF]
): cv.ensure_list,
vol.Optional(CONF_SWING_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_SWING_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_INITIAL, default=21): cv.positive_int,
vol.Optional(CONF_TEMP_MIN, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_TEMP_MAX, default=DEFAULT_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_TEMP_STEP, default=1.0): vol.Coerce(float),
vol.Optional(CONF_TEMP_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_HIGH_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_HIGH_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_HIGH_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_HIGH_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_LOW_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_LOW_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TEMP_LOW_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_LOW_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMP_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMP_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_TEMPERATURE_UNIT): cv.temperature_unit,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
async def async_setup_platform(
hass: HomeAssistant, async_add_entities, config: ConfigType, discovery_info=None
):
"""Set up MQTT climate device through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT climate device dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, climate.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT climate devices."""
async_add_entities([MqttClimate(hass, config, config_entry, discovery_data)])
class MqttClimate(MqttEntity, ClimateEntity):
"""Representation of an MQTT climate device."""
_attributes_extra_blocked = MQTT_CLIMATE_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the climate device."""
self._action = None
self._aux = False
self._away = False
self._current_fan_mode = None
self._current_operation = None
self._current_swing_mode = None
self._current_temp = None
self._hold = None
self._target_temp = None
self._target_temp_high = None
self._target_temp_low = None
self._topic = None
self._value_templates = None
self._command_templates = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
async def async_added_to_hass(self):
"""Handle being added to Home Assistant."""
await super().async_added_to_hass()
await self._subscribe_topics()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._topic = {key: config.get(key) for key in TOPIC_KEYS}
# set to None in non-optimistic mode
self._target_temp = (
self._current_fan_mode
) = self._current_operation = self._current_swing_mode = None
self._target_temp_low = None
self._target_temp_high = None
if self._topic[CONF_TEMP_STATE_TOPIC] is None:
self._target_temp = config[CONF_TEMP_INITIAL]
if self._topic[CONF_TEMP_LOW_STATE_TOPIC] is None:
self._target_temp_low = config[CONF_TEMP_INITIAL]
if self._topic[CONF_TEMP_HIGH_STATE_TOPIC] is None:
self._target_temp_high = config[CONF_TEMP_INITIAL]
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = FAN_LOW
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = HVAC_MODE_OFF
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = HVAC_MODE_OFF
self._action = None
self._away = False
self._hold = None
self._aux = False
value_templates = {}
for key in VALUE_TEMPLATE_KEYS:
value_templates[key] = lambda value: value
if CONF_VALUE_TEMPLATE in config:
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = self.hass
value_templates = {
key: value_template.async_render_with_possible_json_value
for key in VALUE_TEMPLATE_KEYS
}
for key in VALUE_TEMPLATE_KEYS & config.keys():
tpl = config[key]
value_templates[key] = tpl.async_render_with_possible_json_value
tpl.hass = self.hass
self._value_templates = value_templates
command_templates = {}
for key in COMMAND_TEMPLATE_KEYS:
command_templates[key] = lambda value: value
for key in COMMAND_TEMPLATE_KEYS & config.keys():
tpl = config[key]
command_templates[key] = tpl.async_render_with_possible_json_value
tpl.hass = self.hass
self._command_templates = command_templates
async def _subscribe_topics(self): # noqa: C901
"""(Re)Subscribe to topics."""
topics = {}
qos = self._config[CONF_QOS]
def add_subscription(topics, topic, msg_callback):
if self._topic[topic] is not None:
topics[topic] = {
"topic": self._topic[topic],
"msg_callback": msg_callback,
"qos": qos,
}
def render_template(msg, template_name):
template = self._value_templates[template_name]
return template(msg.payload)
@callback
@log_messages(self.hass, self.entity_id)
def handle_action_received(msg):
"""Handle receiving action via MQTT."""
payload = render_template(msg, CONF_ACTION_TEMPLATE)
self._action = payload
self.async_write_ha_state()
add_subscription(topics, CONF_ACTION_TOPIC, handle_action_received)
@callback
def handle_temperature_received(msg, template_name, attr):
"""Handle temperature coming via MQTT."""
payload = render_template(msg, template_name)
try:
setattr(self, attr, float(payload))
self.async_write_ha_state()
except ValueError:
_LOGGER.error("Could not parse temperature from %s", payload)
@callback
@log_messages(self.hass, self.entity_id)
def handle_current_temperature_received(msg):
"""Handle current temperature coming via MQTT."""
handle_temperature_received(
msg, CONF_CURRENT_TEMP_TEMPLATE, "_current_temp"
)
add_subscription(
topics, CONF_CURRENT_TEMP_TOPIC, handle_current_temperature_received
)
@callback
@log_messages(self.hass, self.entity_id)
def handle_target_temperature_received(msg):
"""Handle target temperature coming via MQTT."""
handle_temperature_received(msg, CONF_TEMP_STATE_TEMPLATE, "_target_temp")
add_subscription(
topics, CONF_TEMP_STATE_TOPIC, handle_target_temperature_received
)
@callback
@log_messages(self.hass, self.entity_id)
def handle_temperature_low_received(msg):
"""Handle target temperature low coming via MQTT."""
handle_temperature_received(
msg, CONF_TEMP_LOW_STATE_TEMPLATE, "_target_temp_low"
)
add_subscription(
topics, CONF_TEMP_LOW_STATE_TOPIC, handle_temperature_low_received
)
@callback
@log_messages(self.hass, self.entity_id)
def handle_temperature_high_received(msg):
"""Handle target temperature high coming via MQTT."""
handle_temperature_received(
msg, CONF_TEMP_HIGH_STATE_TEMPLATE, "_target_temp_high"
)
add_subscription(
topics, CONF_TEMP_HIGH_STATE_TOPIC, handle_temperature_high_received
)
@callback
def handle_mode_received(msg, template_name, attr, mode_list):
"""Handle receiving listed mode via MQTT."""
payload = render_template(msg, template_name)
if payload not in self._config[mode_list]:
_LOGGER.error("Invalid %s mode: %s", mode_list, payload)
else:
setattr(self, attr, payload)
self.async_write_ha_state()
@callback
@log_messages(self.hass, self.entity_id)
def handle_current_mode_received(msg):
"""Handle receiving mode via MQTT."""
handle_mode_received(
msg, CONF_MODE_STATE_TEMPLATE, "_current_operation", CONF_MODE_LIST
)
add_subscription(topics, CONF_MODE_STATE_TOPIC, handle_current_mode_received)
@callback
@log_messages(self.hass, self.entity_id)
def handle_fan_mode_received(msg):
"""Handle receiving fan mode via MQTT."""
handle_mode_received(
msg,
CONF_FAN_MODE_STATE_TEMPLATE,
"_current_fan_mode",
CONF_FAN_MODE_LIST,
)
add_subscription(topics, CONF_FAN_MODE_STATE_TOPIC, handle_fan_mode_received)
@callback
@log_messages(self.hass, self.entity_id)
def handle_swing_mode_received(msg):
"""Handle receiving swing mode via MQTT."""
handle_mode_received(
msg,
CONF_SWING_MODE_STATE_TEMPLATE,
"_current_swing_mode",
CONF_SWING_MODE_LIST,
)
add_subscription(
topics, CONF_SWING_MODE_STATE_TOPIC, handle_swing_mode_received
)
@callback
def handle_onoff_mode_received(msg, template_name, attr):
"""Handle receiving on/off mode via MQTT."""
payload = render_template(msg, template_name)
payload_on = self._config[CONF_PAYLOAD_ON]
payload_off = self._config[CONF_PAYLOAD_OFF]
if payload == "True":
payload = payload_on
elif payload == "False":
payload = payload_off
if payload == payload_on:
setattr(self, attr, True)
elif payload == payload_off:
setattr(self, attr, False)
else:
_LOGGER.error("Invalid %s mode: %s", attr, payload)
self.async_write_ha_state()
@callback
@log_messages(self.hass, self.entity_id)
def handle_away_mode_received(msg):
"""Handle receiving away mode via MQTT."""
handle_onoff_mode_received(msg, CONF_AWAY_MODE_STATE_TEMPLATE, "_away")
add_subscription(topics, CONF_AWAY_MODE_STATE_TOPIC, handle_away_mode_received)
@callback
@log_messages(self.hass, self.entity_id)
def handle_aux_mode_received(msg):
"""Handle receiving aux mode via MQTT."""
handle_onoff_mode_received(msg, CONF_AUX_STATE_TEMPLATE, "_aux")
add_subscription(topics, CONF_AUX_STATE_TOPIC, handle_aux_mode_received)
@callback
@log_messages(self.hass, self.entity_id)
def handle_hold_mode_received(msg):
"""Handle receiving hold mode via MQTT."""
payload = render_template(msg, CONF_HOLD_STATE_TEMPLATE)
if payload == "off":
payload = None
self._hold = payload
self.async_write_ha_state()
add_subscription(topics, CONF_HOLD_STATE_TOPIC, handle_hold_mode_received)
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._config.get(CONF_TEMPERATURE_UNIT):
return self._config.get(CONF_TEMPERATURE_UNIT)
return self.hass.config.units.temperature_unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temp
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def target_temperature_low(self):
"""Return the low target temperature we try to reach."""
return self._target_temp_low
@property
def target_temperature_high(self):
"""Return the high target temperature we try to reach."""
return self._target_temp_high
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
return self._action
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._config[CONF_MODE_LIST]
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._config[CONF_TEMP_STEP]
@property
def preset_mode(self):
"""Return preset mode."""
if self._hold:
return self._hold
if self._away:
return PRESET_AWAY
return PRESET_NONE
@property
def preset_modes(self):
"""Return preset modes."""
presets = []
if (self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None
):
presets.append(PRESET_AWAY)
presets.extend(self._config[CONF_HOLD_LIST])
if presets:
presets.insert(0, PRESET_NONE)
return presets
@property
def is_aux_heat(self):
"""Return true if away mode is on."""
return self._aux
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._config[CONF_FAN_MODE_LIST]
def _publish(self, topic, payload):
if self._topic[topic] is not None:
mqtt.async_publish(
self.hass,
self._topic[topic],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def _set_temperature(self, temp, cmnd_topic, cmnd_template, state_topic, attr):
if temp is not None:
if self._topic[state_topic] is None:
# optimistic mode
setattr(self, attr, temp)
if (
self._config[CONF_SEND_IF_OFF]
or self._current_operation != HVAC_MODE_OFF
):
payload = self._command_templates[cmnd_template](temp)
self._publish(cmnd_topic, payload)
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_HVAC_MODE) is not None:
operation_mode = kwargs.get(ATTR_HVAC_MODE)
await self.async_set_hvac_mode(operation_mode)
self._set_temperature(
kwargs.get(ATTR_TEMPERATURE),
CONF_TEMP_COMMAND_TOPIC,
CONF_TEMP_COMMAND_TEMPLATE,
CONF_TEMP_STATE_TOPIC,
"_target_temp",
)
self._set_temperature(
kwargs.get(ATTR_TARGET_TEMP_LOW),
CONF_TEMP_LOW_COMMAND_TOPIC,
CONF_TEMP_LOW_COMMAND_TEMPLATE,
CONF_TEMP_LOW_STATE_TOPIC,
"_target_temp_low",
)
self._set_temperature(
kwargs.get(ATTR_TARGET_TEMP_HIGH),
CONF_TEMP_HIGH_COMMAND_TOPIC,
CONF_TEMP_HIGH_COMMAND_TEMPLATE,
CONF_TEMP_HIGH_STATE_TOPIC,
"_target_temp_high",
)
# Always optimistic?
self.async_write_ha_state()
async def async_set_swing_mode(self, swing_mode):
"""Set new swing mode."""
if self._config[CONF_SEND_IF_OFF] or self._current_operation != HVAC_MODE_OFF:
payload = self._command_templates[CONF_SWING_MODE_COMMAND_TEMPLATE](
swing_mode
)
self._publish(CONF_SWING_MODE_COMMAND_TOPIC, payload)
if self._topic[CONF_SWING_MODE_STATE_TOPIC] is None:
self._current_swing_mode = swing_mode
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
if self._config[CONF_SEND_IF_OFF] or self._current_operation != HVAC_MODE_OFF:
payload = self._command_templates[CONF_FAN_MODE_COMMAND_TEMPLATE](fan_mode)
self._publish(CONF_FAN_MODE_COMMAND_TOPIC, payload)
if self._topic[CONF_FAN_MODE_STATE_TOPIC] is None:
self._current_fan_mode = fan_mode
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode) -> None:
"""Set new operation mode."""
if self._current_operation == HVAC_MODE_OFF and hvac_mode != HVAC_MODE_OFF:
self._publish(CONF_POWER_COMMAND_TOPIC, self._config[CONF_PAYLOAD_ON])
elif self._current_operation != HVAC_MODE_OFF and hvac_mode == HVAC_MODE_OFF:
self._publish(CONF_POWER_COMMAND_TOPIC, self._config[CONF_PAYLOAD_OFF])
payload = self._command_templates[CONF_MODE_COMMAND_TEMPLATE](hvac_mode)
self._publish(CONF_MODE_COMMAND_TOPIC, payload)
if self._topic[CONF_MODE_STATE_TOPIC] is None:
self._current_operation = hvac_mode
self.async_write_ha_state()
@property
def swing_mode(self):
"""Return the swing setting."""
return self._current_swing_mode
@property
def swing_modes(self):
"""List of available swing modes."""
return self._config[CONF_SWING_MODE_LIST]
async def async_set_preset_mode(self, preset_mode):
"""Set a preset mode."""
if preset_mode == self.preset_mode:
return
# Track if we should optimistic update the state
optimistic_update = False
if self._away:
optimistic_update = optimistic_update or self._set_away_mode(False)
elif preset_mode == PRESET_AWAY:
if self._hold:
self._set_hold_mode(None)
optimistic_update = optimistic_update or self._set_away_mode(True)
else:
hold_mode = preset_mode
if preset_mode == PRESET_NONE:
hold_mode = None
optimistic_update = optimistic_update or self._set_hold_mode(hold_mode)
if optimistic_update:
self.async_write_ha_state()
def _set_away_mode(self, state):
"""Set away mode.
Returns if we should optimistically write the state.
"""
self._publish(
CONF_AWAY_MODE_COMMAND_TOPIC,
self._config[CONF_PAYLOAD_ON] if state else self._config[CONF_PAYLOAD_OFF],
)
if self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None:
return False
self._away = state
return True
def _set_hold_mode(self, hold_mode):
"""Set hold mode.
Returns if we should optimistically write the state.
"""
payload = self._command_templates[CONF_HOLD_COMMAND_TEMPLATE](
hold_mode or "off"
)
self._publish(CONF_HOLD_COMMAND_TOPIC, payload)
if self._topic[CONF_HOLD_STATE_TOPIC] is not None:
return False
self._hold = hold_mode
return True
def _set_aux_heat(self, state):
self._publish(
CONF_AUX_COMMAND_TOPIC,
self._config[CONF_PAYLOAD_ON] if state else self._config[CONF_PAYLOAD_OFF],
)
if self._topic[CONF_AUX_STATE_TOPIC] is None:
self._aux = state
self.async_write_ha_state()
async def async_turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self._set_aux_heat(True)
async def async_turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self._set_aux_heat(False)
@property
def supported_features(self):
"""Return the list of supported features."""
support = 0
if (self._topic[CONF_TEMP_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE
if (self._topic[CONF_TEMP_LOW_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_LOW_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if (self._topic[CONF_TEMP_HIGH_STATE_TOPIC] is not None) or (
self._topic[CONF_TEMP_HIGH_COMMAND_TOPIC] is not None
):
support |= SUPPORT_TARGET_TEMPERATURE_RANGE
if (self._topic[CONF_FAN_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_FAN_MODE_COMMAND_TOPIC] is not None
):
support |= SUPPORT_FAN_MODE
if (self._topic[CONF_SWING_MODE_STATE_TOPIC] is not None) or (
self._topic[CONF_SWING_MODE_COMMAND_TOPIC] is not None
):
support |= SUPPORT_SWING_MODE
if (
(self._topic[CONF_AWAY_MODE_STATE_TOPIC] is not None)
or (self._topic[CONF_AWAY_MODE_COMMAND_TOPIC] is not None)
or (self._topic[CONF_HOLD_STATE_TOPIC] is not None)
or (self._topic[CONF_HOLD_COMMAND_TOPIC] is not None)
):
support |= SUPPORT_PRESET_MODE
if (self._topic[CONF_AUX_STATE_TOPIC] is not None) or (
self._topic[CONF_AUX_COMMAND_TOPIC] is not None
):
support |= SUPPORT_AUX_HEAT
return support
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._config[CONF_TEMP_MIN]
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._config[CONF_TEMP_MAX]
@property
def precision(self):
"""Return the precision of the system."""
if self._config.get(CONF_PRECISION) is not None:
return self._config.get(CONF_PRECISION)
return super().precision
|
{
"content_hash": "94d94ff2a78a1d5bec71ae4632e2b4a1",
"timestamp": "",
"source": "github",
"line_count": 895,
"max_line_length": 87,
"avg_line_length": 36.3050279329609,
"alnum_prop": 0.6236420152032746,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "fb19c5e7038777a07d8af4551c8627aa63aa2a7d",
"size": "32493",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mqtt/climate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
""" ./examples/label/bdd/bdd_label.rst """
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.label import has_epic
from allure_commons_test.label import has_feature
from allure_commons_test.label import has_story
def test_single_bdd_label(executed_docstring_path):
assert_that(executed_docstring_path.allure_report,
has_test_case("test_single_bdd_label",
has_epic("My epic"),
has_feature("My feature"),
has_story("My story")
)
)
def test_multiple_bdd_label(executed_docstring_path):
assert_that(executed_docstring_path.allure_report,
has_test_case("test_multiple_bdd_label",
has_epic("My epic"),
has_epic("Another epic"),
has_feature("My feature"),
has_feature("Another feature"),
has_feature("One more feature"),
has_story("My story"),
has_story("Alternative story")
)
)
|
{
"content_hash": "aee5d1bf564966e8d3308a7564d7861f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 62,
"avg_line_length": 40.96774193548387,
"alnum_prop": 0.5007874015748032,
"repo_name": "allure-framework/allure-python",
"id": "28d72275d7c04e689916a9acafdd3e169181ab45",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allure-pytest/test/acceptance/label/bdd/bdd_label_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "35653"
},
{
"name": "Python",
"bytes": "335323"
},
{
"name": "RobotFramework",
"bytes": "14420"
}
],
"symlink_target": ""
}
|
"""Example search service that uses a general db file of populated locations.
This service provides:
A) a function that registers the service.
B) a search service object that inherits from this class with 2 methods:
1) KmlSearch: a method for getting kml search results.
2) JsonSearch: a method for getting json search results.
Along with methods from the base class for creating the kml and json, it uses
the working directory, that has been set via a base class method call, for
locating the geplaces db file.
"""
import os
import re
import ge_base_search
# These should match what you used when defining the search tab.
SEARCH_SERVICE_NAME = "GEPlacesPlugin"
SEARCH_SERVICE_KEY = "location"
# Alternate search key for Google Earth 6.2.2+.
ALT_SEARCH_SERVICE_KEY = "q"
DATA_STORE_NAME = "GEPlaces Search Results"
class GEPlacesSearch(ge_base_search.GEBaseSearch):
"""GEPlaces search service for finding populated locations."""
def __init__(self):
# Initialize the base class.
super(GEPlacesSearch, self).__init__()
def Search(self, handler, placemark_fn, search_term,
delimiter="", max_results=20):
"""Does a grep-based search on geplaces using given placemark renderer.
Since the geplaces file is stored in rough order by population, the
results will come back with most populous locations first.
Args:
handler: Web server handler serving this request.
placemark_fn: Function to call to render the next placemark.
search_term: Search term (or phrase) to use in grep.
delimiter: Delimiter to use between placemarks.
max_results: Maximum number of results (placemarks) to create.
"""
if self.LatLngSearch(handler, placemark_fn, search_term):
return
database_file = "%s%sgeplaces%splaces_200.txt" % (
self.working_directory_, os.sep, os.sep)
try:
fp = open(database_file)
fp.close()
except IOError:
print "Unable to find: %s" % database_file
cnt = 0
pattern = re.compile(search_term, re.IGNORECASE)
for func in (pattern.match, pattern.search):
fp = open(database_file)
for line in fp:
if func(line):
data = line.split("|:")
if cnt > 0:
if delimiter:
handler.write(delimiter)
placemark_fn(handler, data[0], "", "", data[0], data[4],
"Population: %s<br>Latitude: %s<br>Longitude: %s"
% (data[5], data[2], data[3]),
data[3], data[2])
cnt += 1
if cnt >= max_results:
break
fp.close()
if cnt > 0:
break
def KmlSearch(self, handler):
"""Does the indicated search and returns the results as KML.
Args:
handler: Web server handler serving this request.
"""
try:
search_term = handler.request.arguments[SEARCH_SERVICE_KEY][0]
except KeyError:
# Earth 6.2.2+ will use "q" instead.
search_term = handler.request.arguments[ALT_SEARCH_SERVICE_KEY][0]
self.KmlStart(handler, search_term)
self.Search(handler, self.KmlPlacemark, search_term)
self.KmlEnd(handler)
def JsonSearch(self, handler, cb):
"""Does the indicated search and return the results as JSON.
Args:
handler: Web server handler serving this request.
cb: Json callback variable name.
"""
search_term = handler.request.arguments[SEARCH_SERVICE_KEY][0]
self.JsonStart(handler, cb, DATA_STORE_NAME, search_term)
self.Search(handler, self.JsonPlacemark, search_term, ",")
self.JsonEnd(handler)
def RegisterSearchService(search_services):
"""Creates a new search service object and adds it by name to the dict.
Args:
search_services: dict to which the new search service should be added
using its name as the key.
Returns:
the new search service object.
"""
if SEARCH_SERVICE_NAME in search_services.keys():
print "Warning: replacing existing %s service." % SEARCH_SERVICE_NAME
search_services[SEARCH_SERVICE_NAME] = GEPlacesSearch()
return search_services[SEARCH_SERVICE_NAME]
|
{
"content_hash": "514882238f7b1f6ddae4af8b33152c59",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 33.80327868852459,
"alnum_prop": 0.6665858389912707,
"repo_name": "tst-mswartz/earthenterprise",
"id": "2ba00e6659096a6a2809259412d6eae9d39cde21",
"size": "4730",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "earth_enterprise/src/fusion/portableglobe/servers/search_services/search_service_geplaces.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "134506"
},
{
"name": "C++",
"bytes": "12059928"
},
{
"name": "CSS",
"bytes": "337423"
},
{
"name": "Groovy",
"bytes": "29553"
},
{
"name": "HTML",
"bytes": "3297141"
},
{
"name": "Java",
"bytes": "9028"
},
{
"name": "JavaScript",
"bytes": "1531779"
},
{
"name": "Makefile",
"bytes": "3425"
},
{
"name": "PLpgSQL",
"bytes": "13426"
},
{
"name": "Perl",
"bytes": "376215"
},
{
"name": "Prolog",
"bytes": "1423"
},
{
"name": "Python",
"bytes": "2753075"
},
{
"name": "QMake",
"bytes": "5293"
},
{
"name": "Raku",
"bytes": "6715"
},
{
"name": "SWIG",
"bytes": "1959"
},
{
"name": "Shell",
"bytes": "244504"
},
{
"name": "TSQL",
"bytes": "1820"
}
],
"symlink_target": ""
}
|
import time, sys, os, config
from comodit_client.api import Client
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.rest.exceptions import ApiException
from comodit_client.api.host import Host
from helper import create_host, get_short_hostname
def deploy():
# Script
print "Deploying Ceph cluster"
start_time = time.time()
NUM_OF_MON = 1
NUM_OF_OSD = 2
# Connect to the ComodIT API
client = Client(config.endpoint, config.username, config.password)
env = client.get_environment(config.organization, 'Cluster')
# Initialize empty cluster
for key in ("monitors", "osds", "mdss"):
try:
env.settings().create(key, [])
except:
pass
time.sleep(1)
try:
env.settings().create("admin_key", config.admin_key)
except:
pass
time.sleep(1)
conf_app = [{"name": "Ceph Configuration", "settings": {}}]
# Provision hosts
mon_hosts = []
for i in xrange(0, NUM_OF_MON):
try:
mon = env.get_host('Monitor ' + str(i))
except EntityNotFoundException:
mon = create_host(env, 'Monitor ' + str(i), config.platform, config.distribution, conf_app)
print "Deploying Monitor " + str(i)
mon.provision()
mon_hosts.append(mon)
osd_hosts = []
for i in xrange(0, NUM_OF_OSD):
try:
osd = env.get_host('Object Store ' + str(i))
except EntityNotFoundException:
osd = create_host(env, 'Object Store ' + str(i), config.platform, config.distribution, conf_app)
print "Deploying Object Store " + str(i)
osd.provision()
osd_hosts.append(osd)
print "Waiting for all hosts to be deployed..."
for h in mon_hosts + osd_hosts:
h.wait_for_state(Host.State.READY, config.time_out)
# Configure the cluster as it is now known
mon_ips = []
mon_names = []
mon_addrs = []
for h in mon_hosts:
ip = h.get_instance().wait_for_property("ip.eth0", config.time_out)
mon_ips.append(ip)
mon_names.append(get_short_hostname(h.get_instance().wait_for_property("hostname", config.time_out)))
mon_addrs.append(ip + ":6879")
osd_ips = []
osd_names = []
for h in osd_hosts:
osd_ips.append(h.get_instance().wait_for_property("ip.eth0", config.time_out))
osd_names.append(get_short_hostname(h.get_instance().wait_for_property("hostname", config.time_out)))
for i in xrange(0, len(mon_addrs)):
print "Monitor %i has address %s and hostname %s" % (i, mon_addrs[i], mon_names[i])
for i in xrange(0, len(osd_ips)):
print "OSD %i has IP %s and hostname %s" % (i, osd_ips[i], osd_names[i])
print
print "Configure cluster..."
monitors = []
for i in xrange(0, len(mon_addrs)):
monitors.append({"id": str(i), "host": mon_names[i], "addr": mon_addrs[i]})
osds = []
for i in xrange(0, len(osd_names)):
osds.append({"id": str(i), "host": osd_names[i]})
mdss = []
for i in xrange(0, len(mon_names)):
mdss.append({"id": str(i), "host": mon_names[i]})
env.settings().update("monitors", monitors)
time.sleep(3)
env.settings().update("osds", osds)
time.sleep(3)
env.settings().update("mdss", mdss)
time.sleep(3)
env.settings().update("admin_key", config.admin_key)
time.sleep(3)
# Install Ceph
print "Installing first monitor and meta-data service..."
mon_hosts[0].install("Ceph Monitor", {"bootstrap": True, "mon_id": "0", "mon_addr": mon_addrs[0]})
time.sleep(3)
mon_hosts[0].install("Ceph Metadata", {"mds_id": "0"})
mon_hosts[0].wait_for_pending_changes()
print "Installing additional monitors (if any) and meta-data service(s)..."
for i in xrange(1, len(mon_hosts)):
mon_hosts[i].install("Ceph Metadata", {"mds_id": str(i)})
time.sleep(3)
mon_hosts[i].install("Ceph Monitor", {"mon_id": str(i)})
time.sleep(3)
for h in mon_hosts:
h.wait_for_pending_changes()
print "Installing OSD(s)..."
for i in xrange(0, len(osd_hosts)):
osd_hosts[i].install("Ceph Object Store", {"osd_id": str(i), "osd_hostname": osd_names[i]})
time.sleep(3)
for h in osd_hosts:
h.wait_for_pending_changes()
total_time = time.time() - start_time
print "Master node's public IP: %s" % (mon_hosts[0].get_instance().wait_for_address(config.time_out))
print "Deployment time: " + str(total_time)
if __name__ == '__main__':
try:
deploy()
except PythonApiException as e:
print e
|
{
"content_hash": "74d7a6d238c15916483f5b7c4c474c75",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 109,
"avg_line_length": 31.966216216216218,
"alnum_prop": 0.6045233565842316,
"repo_name": "comodit/demos",
"id": "5c824825c7a5e83151c2fe8031ace0ce1c06fbce",
"size": "4754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceph-cluster/deploy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "6281"
},
{
"name": "Python",
"bytes": "54787"
},
{
"name": "Shell",
"bytes": "10304"
}
],
"symlink_target": ""
}
|
'''
For User collection
'''
import json
import tornado.web
from config import CMS_CFG
from torcms.core import tools
from torcms.core.base_handler import BaseHandler
from torcms.core.tools import logger
from torcms.model.collect_model import MCollect
class CollectHandler(BaseHandler):
'''
For User collection
'''
def initialize(self, **kwargs):
super().initialize()
def get(self, *args, **kwargs):
url_str = args[0]
if url_str:
url_arr = self.parse_url(url_str)
else:
return False
if len(url_arr) == 1:
if url_str == 'list':
self.show_list(url_str)
else:
if self.get_current_user():
self.add_or_update(url_str)
else:
self.set_status(403)
return False
elif len(url_arr) == 2:
if url_arr[0] == 'remove':
self.remove_collect(url_arr[1])
else:
self.show_list(url_arr[0], url_arr[1])
@tornado.web.authenticated
def add_or_update(self, app_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(
self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def remove_collect(self, post_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(
self.userinfo.uid, post_id))
MCollect.remove_collect(self.userinfo.uid, post_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def show_list(self, the_list, cur_p=''):
'''
List of the user collections.
'''
current_page_number = 1
if cur_p == '':
current_page_number = 1
else:
try:
current_page_number = int(cur_p)
except TypeError:
current_page_number = 1
except Exception as err:
print(err.args)
print(str(err))
print(repr(err))
current_page_number = 1 if current_page_number < 1 else current_page_number
num_of_cat = MCollect.count_of_user(self.userinfo.uid)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
kwd = {'current_page': current_page_number}
self.render('misc/collect/list.html',
recs_collect=MCollect.query_pager_by_all(
self.userinfo.uid, current_page_number).objects(),
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd=kwd)
|
{
"content_hash": "c81656e448a8834e9d2bf9dd32d8370e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 83,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.5289198606271777,
"repo_name": "bukun/TorCMS",
"id": "a0a2d9a006ad4444cfe0382bb657c4f070e80752",
"size": "2893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torcms/handlers/collect_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "75939"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "HTML",
"bytes": "292427"
},
{
"name": "JavaScript",
"bytes": "34394"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "Python",
"bytes": "747675"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "SCSS",
"bytes": "550"
},
{
"name": "Sass",
"bytes": "69221"
},
{
"name": "Shell",
"bytes": "1317"
}
],
"symlink_target": ""
}
|
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = report.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
self.stats = stats.Stats()
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
move_type='evacuation', limits=limits,
image_meta=image_meta, migration=migration)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
image_meta=image_meta, limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
move_type=None, image_meta=None, limits=None,
migration=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
# Remove usage for an incoming/outgoing migration on the destination
# node.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.pop(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context, nodename):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
def _pair_instances_to_migrations(self, migrations, instances):
instance_by_uuid = {inst.uuid: inst for inst in instances}
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialize the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations, nodename)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug("Hypervisor: free VCPUs: %s", free_vcpus)
else:
free_vcpus = 'unknown'
LOG.debug("Hypervisor: VCPU information unavailable")
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.info("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update(self, context, compute_node):
"""Update partial stats locally and populate them to Scheduler."""
if not self._resource_change(compute_node):
return
nodename = compute_node.hypervisor_hostname
compute_node.save()
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(compute_node)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
cn.running_vms = self.stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating from migration %s", uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
cn = self.compute_nodes[nodename]
self.stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
self.reportclient.update_instance_allocation(cn, instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
cn.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
# Remove allocations for instances that have been removed.
self._remove_deleted_instances_allocations(context, cn)
def _remove_deleted_instances_allocations(self, context, cn):
tracked_keys = set(self.tracked_instances.keys())
allocations = self.reportclient.get_allocations_for_resource_provider(
cn.uuid) or {}
allocations_to_delete = set(allocations.keys()) - tracked_keys
for instance_uuid in allocations_to_delete:
# Allocations related to instances being scheduled should not be
# deleted if we already wrote the allocation previously.
try:
instance = objects.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=[])
if not instance.host:
continue
except exception.InstanceNotFound:
# The instance is gone, so we definitely want to
# remove allocations associated with it.
pass
LOG.debug('Deleting stale allocation for instance %s',
instance_uuid)
self.reportclient.delete_allocation_for_instance(instance_uuid)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': object_or_dict.flavor.root_gb,
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
|
{
"content_hash": "bcef6122998cec4450f53270ad9f4115",
"timestamp": "",
"source": "github",
"line_count": 1139,
"max_line_length": 79,
"avg_line_length": 44.57682177348551,
"alnum_prop": 0.6044551237862643,
"repo_name": "Juniper/nova",
"id": "e5a57101d809f8ff67458fa61efc805afc48574a",
"size": "51413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/compute/resource_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
"""
Functions for conic geometry implemented for
use with the `sympy` symbolic math module.
Generally, for use in testing and validation.
"""
from sympy import Matrix
def center(conic):
ec = conic[:-1,:-1].inv()
eo = -conic[:-1,-1]
return ec*eo
def dual(conic):
return conic.inv()
def polar_plane(ell, point=None):
if point is None:
point = [0]*(ell.shape[0]-1)
pt_ = Matrix(list(point)+[1])
return ell*pt_
def origin_distance(polar_plane):
return polar_plane[-1]/Matrix(polar_plane[:-1]).norm()
|
{
"content_hash": "c1fcf887a239ada9f2fb27073e505f84",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 22.541666666666668,
"alnum_prop": 0.6451016635859519,
"repo_name": "davenquinn/Attitude",
"id": "84782206801cb8685d9324c916bf593c93539cf4",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "attitude/geom/symbolic_math.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2746"
},
{
"name": "Dockerfile",
"bytes": "605"
},
{
"name": "HTML",
"bytes": "4884"
},
{
"name": "JavaScript",
"bytes": "3752148"
},
{
"name": "Makefile",
"bytes": "799"
},
{
"name": "Python",
"bytes": "147146"
},
{
"name": "Shell",
"bytes": "1529"
},
{
"name": "Stylus",
"bytes": "5662"
},
{
"name": "TypeScript",
"bytes": "96170"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_kessel_rebel_mandal_modified_gorax.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_kessel_rebel_mandal_modified_gorax_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "876692ca217e9360e1b8ea99161cd549",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 110,
"avg_line_length": 29,
"alnum_prop": 0.726790450928382,
"repo_name": "anhstudios/swganh",
"id": "93082f959db67ec832e91e869655215bdb2afcbd",
"size": "522",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_kessel_rebel_mandal_modified_gorax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""Fetches the Universal Dependencies.
Visit: http://universaldependencies.org"""
import os
import anna.data.utils as utils
UD_URL = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/"
UD_LINKS = {
"1.0": UD_URL + "1-1464/universal-dependencies-1.0.tgz",
"1.1": UD_URL + "LRT-1478/ud-treebanks-v1.1.tgz",
"1.2": UD_URL + "1-1548/ud-treebanks-v1.2.tgz",
"1.3": UD_URL + "1-1699/ud-treebanks-v1.3.tgz",
"1.4": UD_URL + "1-1827/ud-treebanks-v1.4.tgz"
}
def fetch(data_dir, dest="universal-dependencies", versions=None):
"""
Fetches and extracts the requested versions of the universal
dependencies, and saves them in the given 'folder'.
Creates the `dest` if it doesn't exist.
Args:
data_dir (str): absolute path to the folder where datasets are stored
dest (str): name for dir where UD will be extracted
versions (list[str]): list of UD versions to fetch
Returns:
final_dir (str): absolute path where UD was extracted
"""
# Create folder
ud_dir = os.path.join(data_dir, dest)
utils.create_folder(ud_dir)
if versions is None:
versions = ["1.4"]
for ver in versions:
if ver not in UD_LINKS:
print("Version not supported: " + ver)
url = UD_LINKS[ver]
path = os.path.join(ud_dir, "ud-" + ver + ".tgz")
if not os.path.exists(path):
utils.urlretrieve(url, path)
return ud_dir
|
{
"content_hash": "11cc409700ffe57e9d845b43cf14c5c6",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 29.34,
"alnum_prop": 0.6271301976823449,
"repo_name": "jpbottaro/anna",
"id": "97724cb07b87fc1e3c67135a25cabcb071dff18d",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anna/data/dataset/ud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1203644"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "122148"
}
],
"symlink_target": ""
}
|
import sys
import logging
import requests
import time
import traceback
import html.parser
import urllib.parse
from ray.new_dashboard.tests.conftest import * # noqa
import pytest
import ray
from ray.test_utils import (
format_web_url,
wait_until_server_available,
)
logger = logging.getLogger(__name__)
class LogUrlParser(html.parser.HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._urls = []
def handle_starttag(self, tag, attrs):
if tag == "a":
self._urls.append(dict(attrs)["href"])
def error(self, message):
logger.error(message)
def get_urls(self):
return self._urls
def test_log(disable_aiohttp_cache, ray_start_with_dashboard):
@ray.remote
def write_log(s):
print(s)
test_log_text = "test_log_text"
ray.get(write_log.remote(test_log_text))
assert (wait_until_server_available(ray_start_with_dashboard["webui_url"])
is True)
webui_url = ray_start_with_dashboard["webui_url"]
webui_url = format_web_url(webui_url)
node_id = ray_start_with_dashboard["node_id"]
timeout_seconds = 10
start_time = time.time()
last_ex = None
while True:
time.sleep(1)
try:
response = requests.get(webui_url + "/log_index")
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
all_nodes_log_urls = parser.get_urls()
assert len(all_nodes_log_urls) == 1
response = requests.get(all_nodes_log_urls[0])
response.raise_for_status()
parser = LogUrlParser()
parser.feed(response.text)
# Search test_log_text from all worker logs.
parsed_url = urllib.parse.urlparse(all_nodes_log_urls[0])
paths = parser.get_urls()
urls = []
for p in paths:
if "worker" in p:
urls.append(parsed_url._replace(path=p).geturl())
for u in urls:
response = requests.get(u)
response.raise_for_status()
if test_log_text in response.text:
break
else:
raise Exception(f"Can't find {test_log_text} from {urls}")
# Test range request.
response = requests.get(
webui_url + "/logs/dashboard.log",
headers={"Range": "bytes=43-51"})
response.raise_for_status()
assert response.text == "Dashboard"
# Test logUrl in node info.
response = requests.get(webui_url + f"/nodes/{node_id}")
response.raise_for_status()
node_info = response.json()
assert node_info["result"] is True
node_info = node_info["data"]["detail"]
assert "logUrl" in node_info
assert node_info["logUrl"] in all_nodes_log_urls
break
except Exception as ex:
last_ex = ex
finally:
if time.time() > start_time + timeout_seconds:
ex_stack = traceback.format_exception(
type(last_ex), last_ex,
last_ex.__traceback__) if last_ex else []
ex_stack = "".join(ex_stack)
raise Exception(f"Timed out while testing, {ex_stack}")
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
{
"content_hash": "26f99cc5b64b03e8583b0b565ee2e25a",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 31.34234234234234,
"alnum_prop": 0.5536073584363322,
"repo_name": "richardliaw/ray",
"id": "f92e11032bb5094a97feb15df73f9192d9cb74ef",
"size": "3479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/modules/log/tests/test_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "62178"
},
{
"name": "C++",
"bytes": "4258483"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "6292"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1263157"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "7515224"
},
{
"name": "Shell",
"bytes": "117425"
},
{
"name": "Starlark",
"bytes": "200955"
},
{
"name": "TypeScript",
"bytes": "149068"
}
],
"symlink_target": ""
}
|
# Form implementation generated from reading ui file '.\ImageLoad.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import FileDialog
import ImDiffMod
import cv2
import ImDiff
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(700, 389)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(10, 310, 361, 31))
self.textEdit.setObjectName("textEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(370, 310, 75, 31))
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.openFile)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(450, 310, 81, 31))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.showDiff)
self.pushButton_2.setEnabled(False)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(535, 310, 81, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.hideDiff)
self.pushButton_3.setEnabled(False)
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 0, 0))
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(350, 0, 0, 0))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 532, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.fileOptions = FileDialog.App()
self.safeFiles = []
def ShowImage(self, im1, im2):
im1 = cv2.resize(im1, dsize=None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
im2 = cv2.resize(im2, dsize=None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# image = QtGui.QImage(fileName)
# if image.isNull():
# QtWidgets.QMessageBox.information(self, "Image Viewer", "Cannot load %s." % fileName)
# return
height, width, channels = im2.shape
# height, width = im2.shape
# print(im2.shape)
bytesPerLine = 3*width
try:
qImg1 = QtGui.QImage(im1.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
# qImg2 = QtGui.QImage(im2.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
qImg3 = QtGui.QImage(im2.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
except TypeError:
return
print('Images converted')
leftPixelMap = QtGui.QPixmap(qImg1)
# rightPixelMap = QtGui.QPixmap(qImg2)
diffPixelMap = QtGui.QPixmap(qImg3)
print('PixelMaps drawn')
self.label.setPixmap(leftPixelMap)
self.label.resize(leftPixelMap.width(), leftPixelMap.height())
self.label.show()
# self.label_2.setPixmap(rightPixelMap)
# self.label_2.resize(rightPixelMap.width(), rightPixelMap.height())
# self.label_2.show()
self.label_3.setPixmap(diffPixelMap)
self.label_3.resize(diffPixelMap.width(), diffPixelMap.height())
self.label_3.show()
def openFile(self):
fileName = self.fileOptions.openFileNameDialog()
print('Opened')
if fileName:
self.safeFiles.append(fileName)
self.im1, self.im2 = ImDiffMod.PrcessImage(fileName)
# cv2.imwrite('CutUpImage1.png', self.im1)
# cv2.imwrite('CutUpImage2.png', self.im2)
self.ShowImage(self.im1, self.im2)
self.scaleFactor = 1.0
self.pushButton_2.setEnabled(True)
self.pushButton_3.setEnabled(True)
def showDiff(self):
print('Showing differences')
temp1 = self.im1.copy()
temp2 = self.im2.copy()
imP, imN = ImDiff.imDiff(temp1, temp2)
# cv2.imwrite('Differences1.png', imP)
# cv2.imwrite('Differences2.png', imN)
self.ShowImage(imP, imN)
def hideDiff(self):
print('Hiding differences')
self.ShowImage(self.im1, self.im2)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Browse"))
self.pushButton_2.setText(_translate("MainWindow", "Show"))
self.pushButton_3.setText(_translate("MainWindow", "Hide"))
self.label.setText(_translate("MainWindow", "Image A"))
self.label_3.setText(_translate("MainWindow", "Image C"))
def SaveFile(files):
with open('SafeFiles.txt', 'w') as f:
for name in files:
f.write(str(name).join('\n'))
f.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ret = app.exec_()
# print('Saving safe files')
# SaveFile(ui.safeFiles)
sys.exit(ret)
|
{
"content_hash": "a96f68b3fc73d4f78dda3e64865f3dcb",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 101,
"avg_line_length": 37.77018633540373,
"alnum_prop": 0.6341062325275448,
"repo_name": "SoumyajitPal/YinYang",
"id": "12fbda67529fc6566bc419384a332909b60ecb7b",
"size": "6106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "YinYang.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17703"
}
],
"symlink_target": ""
}
|
__all__ = ['ArpWatchLogging', 'ArpData','RosAPI']
|
{
"content_hash": "df0ec671a61d90c24423b9da990a75f1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 49,
"avg_line_length": 50,
"alnum_prop": 0.62,
"repo_name": "davidnutter/mikrotik-arpwatch",
"id": "76bf930f9bcb9a4e975d4c576705c162d7d39818",
"size": "1700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ArpWatch/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "50912"
},
{
"name": "Shell",
"bytes": "1846"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class SchedulerConfig(AppConfig):
name = 'scheduler'
|
{
"content_hash": "e27c254c829b4932e93ce4e006cd93c8",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.6,
"alnum_prop": 0.7634408602150538,
"repo_name": "saketbairoliya2/salescheduler",
"id": "60fb286a921fd77afb7998faa0e3ce26da4478f9",
"size": "93",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scheduler/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "18772"
},
{
"name": "Python",
"bytes": "20995"
}
],
"symlink_target": ""
}
|
__author__ = 'Traphix'
import sys
import time
error_log_file = '/ms/msc/logs/msc_error.log'
class ErrorLogger(object):
def __init__(self):
self.terminal = sys.stderr
self.log = file(error_log_file, "a+")
self.log.write('\n*********************************\n' + \
'MSC start at %s\n*********************************\n' % \
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
def write(self, message):
self.terminal.write(message)
self.log.write(message)
sys.stderr = ErrorLogger()
import sdoiafjoa
|
{
"content_hash": "d904ae05c133034fb93cfd714c141295",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 87,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.49836601307189543,
"repo_name": "halexan/msc",
"id": "24c744f2344cc598a1a08563383f2200b98eef5b",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/foo_test/error_redirect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "255750"
},
{
"name": "Shell",
"bytes": "30804"
}
],
"symlink_target": ""
}
|
import itertools
import json
import logging
import re
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from search.models import Collection
from controller import CollectionManagerController
from utils import fields_from_log, field_values_from_separated_file, get_type_from_morphline_type, get_field_types
LOG = logging.getLogger(__name__)
def parse_fields(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
result = {'status': -1}
source_type = request.POST.get('source')
if source_type == 'file':
content_type = request.POST.get('type')
try:
if content_type == 'separated':
delimiter = request.POST.get('separator', ',')
quote = request.POST.get('quote', '"')
file_obj = request.fs.open(request.POST.get('path'))
field_list = field_values_from_separated_file(file_obj, delimiter, quote)
row = next(field_list)
field_names = row.keys()
field_types = get_field_types((row.values() for row in itertools.chain([row], field_list)), iterations=51)
file_obj.close()
result['data'] = zip(field_names, field_types)
result['status'] = 0
elif content_type == 'morphlines':
morphlines = json.loads(request.POST.get('morphlines'))
# Look for entries that take on the form %{SYSLOGTIMESTAMP:timestamp}
field_results = re.findall(r'\%\{(?P<type>\w+)\:(?P<name>\w+)\}', morphlines['expression'])
if field_results:
result['data'] = []
for field_result in field_results:
result['data'].append( (field_result[1], get_type_from_morphline_type(field_result[0])) )
result['status'] = 0
else:
result['status'] = 1
result['message'] = _('Could not detect any fields.')
elif content_type == 'log':
file_obj = request.fs.open(request.POST.get('path'))
result['data'] = fields_from_log(file_obj)
file_obj.close()
result['status'] = 0
else:
result['status'] = 1
result['message'] = _('Type %s not supported.') % content_type
except Exception, e:
LOG.exception(e.message)
result['message'] = e.message
else:
result['message'] = _('Source type %s not supported.') % source_type
return JsonResponse(result)
def collections(request):
searcher = CollectionManagerController(request.user)
solr_collections = searcher.get_collections()
massaged_collections = []
for collection in solr_collections:
massaged_collections.append({
'name': collection,
'isCoreOnly': solr_collections[collection]['isCoreOnly'],
'isAlias': solr_collections[collection].get('isAlias', False),
'collections': solr_collections[collection].get('collections', []),
})
response = {
'status': 0,
'collections': massaged_collections
}
return JsonResponse(response)
def collections_create(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
if collection:
searcher = CollectionManagerController(request.user)
# Create instance directory, collection, and add fields
searcher.create_collection(collection.get('name'), collection.get('fields', []), collection.get('uniqueKeyField'), collection.get('df'))
try:
if request.POST.get('source') == 'file':
# Index data
searcher.update_data_from_hdfs(request.fs,
collection.get('name'),
collection.get('fields', []),
request.POST.get('path'),
request.POST.get('type'),
separator=request.POST.get('separator'),
quote_character=request.POST.get('quote'))
elif request.POST.get('source') == 'hive':
# Run a custom hive query and post data to collection
from beeswax.server import dbms
db = dbms.get(request.user)
database = request.POST.get('database')
table = request.POST.get('table')
columns = [field['name'] for field in collection.get('fields', [])]
searcher.update_data_from_hive(db, collection.get('name'), database, table, columns)
response['status'] = 0
response['message'] = _('Collection created!')
except Exception, e:
LOG.error(e)
raise
else:
response['message'] = _('Collection missing.')
return JsonResponse(response)
def collections_import(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
if collection:
searcher = CollectionManagerController(request.user)
unique_key, fields = searcher.get_fields(collection.get('name'))
# Create collection and metadata.
hue_collection, created = Collection.objects.get_or_create(name=collection.get('name'), solr_properties='{}', is_enabled=True, user=request.user)
properties_dict = hue_collection.properties_dict
properties_dict['data_type'] = 'separated'
properties_dict['field_order'] = [field_name for field_name in fields]
hue_collection.properties = json.dumps(properties_dict)
hue_collection.save()
response['status'] = 0
response['message'] = _('Collection created!')
else:
response['message'] = _('Collection missing.')
return JsonResponse(response)
def collections_remove(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
response = {'status': -1}
collections = json.loads(request.POST.get('collections', '[]'))
if not collections:
response['message'] = _('No collections to remove.')
if response.get('message', None) is None:
searcher = CollectionManagerController(request.user)
solr_collections = searcher.get_collections()
for collection in collections:
if collection.get('name') in solr_collections:
# Remove collection and instancedir
searcher.delete_collection(collection.get('name'), collection.get('isCoreOnly'))
response['status'] = 0
response['message'] = _('Collections removed!')
return JsonResponse(response)
def collections_fields(request, collection):
if request.method != 'GET':
raise PopupException(_('GET request required.'))
response = {}
searcher = CollectionManagerController(request.user)
unique_key, fields = searcher.get_fields(collection)
response['status'] = 0
response['fields'] = [(field, fields[field]['type'], fields[field].get('indexed', None), fields[field].get('stored', None)) for field in fields]
response['unique_key'] = unique_key
return JsonResponse(response)
def collections_update(request, collection):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
response = {'status': -1}
collection = json.loads(request.POST.get('collection', '{}'))
if not collection:
response['message'] = _('No collection to update.')
if response.get('message', None) is None:
searcher = CollectionManagerController(request.user)
searcher.update_collection(collection.get('name'), collection.get('fields', []))
response['status'] = 0
response['message'] = _('Collection updated!')
return JsonResponse(response)
def collections_data(request, collection):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
response = {'status': -1}
source = request.POST.get('source')
if source == 'file':
searcher = CollectionManagerController(request.user)
searcher.update_data_from_hdfs(request.fs,
collection,
None,
request.POST.get('path'),
request.POST.get('type'),
separator=request.POST.get('separator'),
quote_character=request.POST.get('quote'))
response['status'] = 0
response['message'] = _('Index imported!')
else:
response['message'] = _('Unsupported source %s') % source
return JsonResponse(response)
|
{
"content_hash": "d04fdbce066cf1769ef18db6b5ef0be4",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 149,
"avg_line_length": 32.883720930232556,
"alnum_prop": 0.6333097595473833,
"repo_name": "mapr/hue",
"id": "d4e6acd9b2967768e64362792b942b254c3f5887",
"size": "9276",
"binary": false,
"copies": "7",
"ref": "refs/heads/hue-3.9.0-mapr",
"path": "desktop/libs/indexer/src/indexer/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "9984620"
},
{
"name": "C++",
"bytes": "196076"
},
{
"name": "CSS",
"bytes": "374307"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3682996"
},
{
"name": "JavaScript",
"bytes": "963632"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "Python",
"bytes": "21427931"
},
{
"name": "Shell",
"bytes": "33699"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "XSLT",
"bytes": "190688"
}
],
"symlink_target": ""
}
|
from unittest import main
from qiita_pet.test.tornado_test_base import TestHandlerBase
class TestLogEntryViewerHandler(TestHandlerBase):
def test_get(self):
response = self.get('/admin/error/')
self.assertEqual(response.code, 403)
def test_post(self):
response = self.post('/admin/error/', {'numrecords': -5})
self.assertEqual(response.code, 403)
response = self.post('/admin/error/', {'numrecords': 20})
self.assertEqual(response.code, 403)
if __name__ == "__main__":
main()
|
{
"content_hash": "6a41304c96fcca276ff1f6c2fcd59320",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 65,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6494464944649446,
"repo_name": "biocore/qiita",
"id": "803f521d3144d4b8bff3b1c4927ec375d7a0032d",
"size": "893",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiita_pet/test/test_logger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2334"
},
{
"name": "HTML",
"bytes": "552473"
},
{
"name": "JavaScript",
"bytes": "93567"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "84875"
},
{
"name": "Python",
"bytes": "2469738"
},
{
"name": "SQLPL",
"bytes": "2805"
},
{
"name": "Shell",
"bytes": "3232"
},
{
"name": "TSQL",
"bytes": "202297"
}
],
"symlink_target": ""
}
|
import numba
import numpy as np
@numba.jit(nopython=True, parallel=True)
def _average_visibilities(vis, weight, flag, timeav, chanav, flagav):
# Workaround for https://github.com/numba/numba/issues/2921
flag_u8 = flag.view(np.uint8)
# Compute shapes
n_time, n_chans, n_bl = vis.shape
av_n_time = n_time // timeav
av_n_chans = n_chans // chanav
av_shape = (av_n_time, av_n_chans, n_bl)
# Allocate output buffers
av_vis = np.empty(av_shape, vis.dtype)
av_weight = np.empty(av_shape, weight.dtype)
av_flag = np.empty(av_shape, flag.dtype)
scale = weight.dtype.type(1.0 / (timeav * chanav))
wzero = weight.dtype.type(0) # Zero constant of correct type
bl_step = 128 # Want a chunk to be multiple cache lines but into L1
# We put channel as the outer loop just because it's more likely than
# time to get parallel speedup with prange (since the time axis is often
# short e.g. 1).
for av_c in numba.prange(0, av_n_chans):
cstart = av_c * chanav
vis_sum = np.empty(bl_step, vis.dtype)
vis_weight_sum = np.empty(bl_step, vis.dtype)
weight_sum = np.empty(bl_step, weight.dtype)
flag_any = np.empty(bl_step, np.bool_)
flag_all = np.empty(bl_step, np.bool_)
for av_t in range(0, av_n_time):
tstart = av_t * timeav
for bstart in range(0, n_bl, bl_step):
bstop = min(n_bl, bstart + bl_step)
vis_sum[:] = 0
vis_weight_sum[:] = 0
weight_sum[:] = 0
flag_any[:] = False
flag_all[:] = True
for t in range(tstart, tstart + timeav):
for c in range(cstart, cstart + chanav):
for b in range(bstop - bstart):
b1 = b + bstart
v = vis[t, c, b1]
w = weight[t, c, b1]
f = (flag_u8[t, c, b1] != 0)
if f:
# Don't simply use 0 here: it causes numba's type
# inference to upgrade w from float32 to float64.
w = wzero
flag_any[b] |= f
flag_all[b] &= f
vis_sum[b] += v
vis_weight_sum[b] += w * v
weight_sum[b] += w
for b in range(bstop - bstart):
b1 = b + bstart
w = np.float32(weight_sum[b])
# If everything is flagged/zero-weighted, use an unweighted average
if not w:
v = vis_sum[b] * scale
else:
v = vis_weight_sum[b] / w
f = flag_any[b] if flagav else flag_all[b]
av_vis[av_t, av_c, b1] = v
av_weight[av_t, av_c, b1] = w
av_flag[av_t, av_c, b1] = f
return av_vis, av_weight, av_flag
def average_visibilities(vis, weight, flag, timestamps, channel_freqs, timeav=10, chanav=8, flagav=False):
"""Average visibilities, flags and weights.
Visibilities are weight-averaged using the weights in the `weight` array
with flagged data set to weight zero. The averaged weights are the sum of
the input weights for each average block. An average flag is retained if
all of the data in an averaging block is flagged (the averaged visibility
in this case is the unweighted average of the input visibilities). In cases
where the averaging size in channel or time does not evenly divide the size
of the input data, the remaining channels or timestamps at the end of the
array after averaging are discarded. Channels are averaged first and the
timestamps are second. An array of timestamps and frequencies corresponding
to each channel is also directly averaged and returned.
Parameters
----------
vis: array(numtimestamps,numchannels,numbaselines) of complex64.
The input visibilities to be averaged.
weight: array(numtimestamps,numchannels,numbaselines) of float32.
The input weights (used for weighted averaging).
flag: array(numtimestamps,numchannels,numbaselines) of boolean.
Input flags (flagged data have weight zero before averaging).
timestamps: array(numtimestamps) of int.
The timestamps (in mjd seconds) corresponding to the input data.
channel_freqs: array(numchannels) of int.
The frequencies (in Hz) corresponding to the input channels.
timeav: int.
The desired averaging size in timestamps.
chanav: int.
The desired averaging size in channels.
flagav: bool
Flagged averaged data in when there is a single flag in the bin if true.
Only flag averaged data when all data in the bin is flagged if false.
Returns
-------
av_vis: array(int(numtimestamps/timeav),int(numchannels/chanav)) of complex64.
av_weight: array(int(numtimestamps/timeav),int(numchannels/chanav)) of float32.
av_flag: array(int(numtimestamps/timeav),int(numchannels/chanav)) of boolean.
av_mjd: array(int(numtimestamps/timeav)) of int.
av_freq: array(int(numchannels)/chanav) of int.
"""
# Trim data to integer multiples of the averaging factors
n_time, n_chans, n_bl = vis.shape
timeav = min(timeav, n_time)
flagav = min(flagav, n_chans)
n_time = n_time // timeav * timeav
n_chans = n_chans // chanav * chanav
vis = vis[:n_time, :n_chans]
weight = weight[:n_time, :n_chans]
flag = flag[:n_time, :n_chans]
timestamps = timestamps[:n_time]
channel_freqs = channel_freqs[:n_chans]
# Average the data (using a numba-accelerated function)
av_vis, av_weight, av_flag = \
_average_visibilities(vis, weight, flag, timeav, chanav, flagav)
# Average the metadata
av_freq = np.mean(channel_freqs.reshape(-1, chanav), axis=-1)
av_timestamps = np.mean(timestamps.reshape(-1, timeav), axis=-1)
return av_vis, av_weight, av_flag, av_timestamps, av_freq
|
{
"content_hash": "9989b27a579497100e396ee12ac0cdff",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 106,
"avg_line_length": 44.70503597122302,
"alnum_prop": 0.586578693273254,
"repo_name": "ska-sa/katdal",
"id": "22aecea2f50f176712d2168c2ba82a3997dab775",
"size": "6980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katdal/averager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "806486"
}
],
"symlink_target": ""
}
|
"""
[1] - Attention Is All You Need - Vaswani, Jones, Shazeer, Parmar,
Uszkoreit, Gomez, Kaiser - Google Brain/Research, U Toronto - 2017.
https://arxiv.org/pdf/1706.03762.pdf
[2] - Stabilizing Transformers for Reinforcement Learning - E. Parisotto
et al. - DeepMind - 2019. https://arxiv.org/pdf/1910.06764.pdf
[3] - Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context.
Z. Dai, Z. Yang, et al. - Carnegie Mellon U - 2019.
https://www.aclweb.org/anthology/P19-1285.pdf
"""
import gym
from gym.spaces import Box, Discrete, MultiDiscrete
import numpy as np
from typing import Dict, Optional, Union
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.modules import GRUGate, \
RelativeMultiHeadAttention, SkipConnection
from ray.rllib.models.torch.recurrent_net import RecurrentNetwork
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.torch_ops import one_hot
from ray.rllib.utils.typing import ModelConfigDict, TensorType, List
torch, nn = try_import_torch()
class GTrXLNet(RecurrentNetwork, nn.Module):
"""A GTrXL net Model described in [2].
This is still in an experimental phase.
Can be used as a drop-in replacement for LSTMs in PPO and IMPALA.
For an example script, see: `ray/rllib/examples/attention_net.py`.
To use this network as a replacement for an RNN, configure your Trainer
as follows:
Examples:
>> config["model"]["custom_model"] = GTrXLNet
>> config["model"]["max_seq_len"] = 10
>> config["model"]["custom_model_config"] = {
>> num_transformer_units=1,
>> attention_dim=32,
>> num_heads=2,
>> memory_tau=50,
>> etc..
>> }
"""
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: Optional[int],
model_config: ModelConfigDict,
name: str,
*,
num_transformer_units: int = 1,
attention_dim: int = 64,
num_heads: int = 2,
memory_inference: int = 50,
memory_training: int = 50,
head_dim: int = 32,
position_wise_mlp_dim: int = 32,
init_gru_gate_bias: float = 2.0):
"""Initializes a GTrXLNet.
Args:
num_transformer_units (int): The number of Transformer repeats to
use (denoted L in [2]).
attention_dim (int): The input and output dimensions of one
Transformer unit.
num_heads (int): The number of attention heads to use in parallel.
Denoted as `H` in [3].
memory_inference (int): The number of timesteps to concat (time
axis) and feed into the next transformer unit as inference
input. The first transformer unit will receive this number of
past observations (plus the current one), instead.
memory_training (int): The number of timesteps to concat (time
axis) and feed into the next transformer unit as training
input (plus the actual input sequence of len=max_seq_len).
The first transformer unit will receive this number of
past observations (plus the input sequence), instead.
head_dim (int): The dimension of a single(!) attention head within
a multi-head attention unit. Denoted as `d` in [3].
position_wise_mlp_dim (int): The dimension of the hidden layer
within the position-wise MLP (after the multi-head attention
block within one Transformer unit). This is the size of the
first of the two layers within the PositionwiseFeedforward. The
second layer always has size=`attention_dim`.
init_gru_gate_bias (float): Initial bias values for the GRU gates
(two GRUs per Transformer unit, one after the MHA, one after
the position-wise MLP).
"""
super().__init__(observation_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
self.num_transformer_units = num_transformer_units
self.attention_dim = attention_dim
self.num_heads = num_heads
self.memory_inference = memory_inference
self.memory_training = memory_training
self.head_dim = head_dim
self.max_seq_len = model_config["max_seq_len"]
self.obs_dim = observation_space.shape[0]
self.linear_layer = SlimFC(
in_size=self.obs_dim, out_size=self.attention_dim)
self.layers = [self.linear_layer]
attention_layers = []
# 2) Create L Transformer blocks according to [2].
for i in range(self.num_transformer_units):
# RelativeMultiHeadAttention part.
MHA_layer = SkipConnection(
RelativeMultiHeadAttention(
in_dim=self.attention_dim,
out_dim=self.attention_dim,
num_heads=num_heads,
head_dim=head_dim,
input_layernorm=True,
output_activation=nn.ReLU),
fan_in_layer=GRUGate(self.attention_dim, init_gru_gate_bias))
# Position-wise MultiLayerPerceptron part.
E_layer = SkipConnection(
nn.Sequential(
torch.nn.LayerNorm(self.attention_dim),
SlimFC(
in_size=self.attention_dim,
out_size=position_wise_mlp_dim,
use_bias=False,
activation_fn=nn.ReLU),
SlimFC(
in_size=position_wise_mlp_dim,
out_size=self.attention_dim,
use_bias=False,
activation_fn=nn.ReLU)),
fan_in_layer=GRUGate(self.attention_dim, init_gru_gate_bias))
# Build a list of all attanlayers in order.
attention_layers.extend([MHA_layer, E_layer])
# Create a Sequential such that all parameters inside the attention
# layers are automatically registered with this top-level model.
self.attention_layers = nn.Sequential(*attention_layers)
self.layers.extend(attention_layers)
# Final layers if num_outputs not None.
self.logits = None
self.values_out = None
# Last value output.
self._value_out = None
# Postprocess GTrXL output with another hidden layer.
if self.num_outputs is not None:
self.logits = SlimFC(
in_size=self.attention_dim,
out_size=self.num_outputs,
activation_fn=nn.ReLU)
# Value function used by all RLlib Torch RL implementations.
self.values_out = SlimFC(
in_size=self.attention_dim, out_size=1, activation_fn=None)
else:
self.num_outputs = self.attention_dim
# Setup trajectory views (`memory-inference` x past memory outs).
for i in range(self.num_transformer_units):
space = Box(-1.0, 1.0, shape=(self.attention_dim, ))
self.view_requirements["state_in_{}".format(i)] = \
ViewRequirement(
"state_out_{}".format(i),
shift="-{}:-1".format(self.memory_inference),
# Repeat the incoming state every max-seq-len times.
batch_repeat_value=self.max_seq_len,
space=space)
self.view_requirements["state_out_{}".format(i)] = \
ViewRequirement(
space=space,
used_for_training=False)
@override(ModelV2)
def forward(self, input_dict, state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
assert seq_lens is not None
# Add the needed batch rank (tf Models' Input requires this).
observations = input_dict[SampleBatch.OBS]
# Add the time dim to observations.
B = len(seq_lens)
T = observations.shape[0] // B
observations = torch.reshape(observations,
[-1, T] + list(observations.shape[1:]))
all_out = observations
memory_outs = []
for i in range(len(self.layers)):
# MHA layers which need memory passed in.
if i % 2 == 1:
all_out = self.layers[i](all_out, memory=state[i // 2])
# Either self.linear_layer (initial obs -> attn. dim layer) or
# MultiLayerPerceptrons. The output of these layers is always the
# memory for the next forward pass.
else:
all_out = self.layers[i](all_out)
memory_outs.append(all_out)
# Discard last output (not needed as a memory since it's the last
# layer).
memory_outs = memory_outs[:-1]
if self.logits is not None:
out = self.logits(all_out)
self._value_out = self.values_out(all_out)
out_dim = self.num_outputs
else:
out = all_out
out_dim = self.attention_dim
return torch.reshape(out, [-1, out_dim]), [
torch.reshape(m, [-1, self.attention_dim]) for m in memory_outs
]
# TODO: (sven) Deprecate this once trajectory view API has fully matured.
@override(RecurrentNetwork)
def get_initial_state(self) -> List[np.ndarray]:
return []
@override(ModelV2)
def value_function(self) -> TensorType:
assert self._value_out is not None,\
"Must call forward first AND must have value branch!"
return torch.reshape(self._value_out, [-1])
class AttentionWrapper(TorchModelV2, nn.Module):
"""GTrXL wrapper serving as interface for ModelV2s that set use_attention.
"""
def __init__(self, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space, num_outputs: int,
model_config: ModelConfigDict, name: str):
nn.Module.__init__(self)
super().__init__(obs_space, action_space, None, model_config, name)
self.use_n_prev_actions = model_config["attention_use_n_prev_actions"]
self.use_n_prev_rewards = model_config["attention_use_n_prev_rewards"]
if isinstance(action_space, Discrete):
self.action_dim = action_space.n
elif isinstance(action_space, MultiDiscrete):
self.action_dim = np.product(action_space.nvec)
elif action_space.shape is not None:
self.action_dim = int(np.product(action_space.shape))
else:
self.action_dim = int(len(action_space))
# Add prev-action/reward nodes to input to LSTM.
if self.use_n_prev_actions:
self.num_outputs += self.use_n_prev_actions * self.action_dim
if self.use_n_prev_rewards:
self.num_outputs += self.use_n_prev_rewards
cfg = model_config
self.attention_dim = cfg["attention_dim"]
if self.num_outputs is not None:
in_space = gym.spaces.Box(
float("-inf"),
float("inf"),
shape=(self.num_outputs, ),
dtype=np.float32)
else:
in_space = obs_space
# Construct GTrXL sub-module w/ num_outputs=None (so it does not
# create a logits/value output; we'll do this ourselves in this wrapper
# here).
self.gtrxl = GTrXLNet(
in_space,
action_space,
None,
model_config,
"gtrxl",
num_transformer_units=cfg["attention_num_transformer_units"],
attention_dim=self.attention_dim,
num_heads=cfg["attention_num_heads"],
head_dim=cfg["attention_head_dim"],
memory_inference=cfg["attention_memory_inference"],
memory_training=cfg["attention_memory_training"],
position_wise_mlp_dim=cfg["attention_position_wise_mlp_dim"],
init_gru_gate_bias=cfg["attention_init_gru_gate_bias"],
)
# Set final num_outputs to correct value (depending on action space).
self.num_outputs = num_outputs
# Postprocess GTrXL output with another hidden layer and compute
# values.
self._logits_branch = SlimFC(
in_size=self.attention_dim,
out_size=self.num_outputs,
activation_fn=None,
initializer=torch.nn.init.xavier_uniform_)
self._value_branch = SlimFC(
in_size=self.attention_dim,
out_size=1,
activation_fn=None,
initializer=torch.nn.init.xavier_uniform_)
self.view_requirements = self.gtrxl.view_requirements
self.view_requirements["obs"].space = self.obs_space
# Add prev-a/r to this model's view, if required.
if self.use_n_prev_actions:
self.view_requirements[SampleBatch.PREV_ACTIONS] = \
ViewRequirement(
SampleBatch.ACTIONS,
space=self.action_space,
shift="-{}:-1".format(self.use_n_prev_actions))
if self.use_n_prev_rewards:
self.view_requirements[SampleBatch.PREV_REWARDS] = \
ViewRequirement(
SampleBatch.REWARDS,
shift="-{}:-1".format(self.use_n_prev_rewards))
@override(RecurrentNetwork)
def forward(self, input_dict: Dict[str, TensorType],
state: List[TensorType],
seq_lens: TensorType) -> (TensorType, List[TensorType]):
assert seq_lens is not None
# Push obs through "unwrapped" net's `forward()` first.
wrapped_out, _ = self._wrapped_forward(input_dict, [], None)
# Concat. prev-action/reward if required.
prev_a_r = []
if self.use_n_prev_actions:
if isinstance(self.action_space, Discrete):
for i in range(self.use_n_prev_actions):
prev_a_r.append(
one_hot(
input_dict[SampleBatch.PREV_ACTIONS][:, i].float(),
self.action_space))
elif isinstance(self.action_space, MultiDiscrete):
for i in range(
self.use_n_prev_actions,
step=self.action_space.shape[0]):
prev_a_r.append(
one_hot(
input_dict[SampleBatch.PREV_ACTIONS]
[:, i:i + self.action_space.shape[0]].float(),
self.action_space))
else:
prev_a_r.append(
torch.reshape(
input_dict[SampleBatch.PREV_ACTIONS].float(),
[-1, self.use_n_prev_actions * self.action_dim]))
if self.use_n_prev_rewards:
prev_a_r.append(
torch.reshape(input_dict[SampleBatch.PREV_REWARDS].float(),
[-1, self.use_n_prev_rewards]))
if prev_a_r:
wrapped_out = torch.cat([wrapped_out] + prev_a_r, dim=1)
# Then through our GTrXL.
input_dict["obs_flat"] = input_dict["obs"] = wrapped_out
self._features, memory_outs = self.gtrxl(input_dict, state, seq_lens)
model_out = self._logits_branch(self._features)
return model_out, memory_outs
@override(ModelV2)
def get_initial_state(self) -> Union[List[np.ndarray], List[TensorType]]:
return []
@override(ModelV2)
def value_function(self) -> TensorType:
assert self._features is not None, "Must call forward() first!"
return torch.reshape(self._value_branch(self._features), [-1])
|
{
"content_hash": "d196197a8ad0430321ee87853665cb50",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 79,
"avg_line_length": 42.095115681233935,
"alnum_prop": 0.5725801526717558,
"repo_name": "pcmoritz/ray-1",
"id": "0a48bd91e8ce3040f764616961ce273c6399d1ca",
"size": "16375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/models/torch/attention_net.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
"""Library with common functions for training and eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow as tf
from tensorflow.contrib.slim.nets import resnet_v2
def default_hparams():
"""Returns default hyperparameters."""
return tf.contrib.training.HParams(
# Batch size for training and evaluation.
batch_size=32,
eval_batch_size=50,
# General training parameters.
weight_decay=0.0001,
label_smoothing=0.1,
# Parameters of the adversarial training.
train_adv_method='clean', # adversarial training method
train_lp_weight=0.0, # Weight of adversarial logit pairing loss
# Parameters of the optimizer.
optimizer='rms', # possible values are: 'rms', 'momentum', 'adam'
momentum=0.9, # momentum
rmsprop_decay=0.9, # Decay term for RMSProp
rmsprop_epsilon=1.0, # Epsilon term for RMSProp
# Parameters of learning rate schedule.
lr_schedule='exp_decay', # Possible values: 'exp_decay', 'step', 'fixed'
learning_rate=0.045,
lr_decay_factor=0.94, # Learning exponential decay
lr_num_epochs_per_decay=2.0, # Number of epochs per lr decay
lr_list=[1.0 / 6, 2.0 / 6, 3.0 / 6,
4.0 / 6, 5.0 / 6, 1.0, 0.1, 0.01,
0.001, 0.0001],
lr_decay_epochs=[1, 2, 3, 4, 5, 30, 60, 80,
90])
def get_lr_schedule(hparams, examples_per_epoch, replicas_to_aggregate=1):
"""Returns TensorFlow op which compute learning rate.
Args:
hparams: hyper parameters.
examples_per_epoch: number of training examples per epoch.
replicas_to_aggregate: number of training replicas running in parallel.
Raises:
ValueError: if learning rate schedule specified in hparams is incorrect.
Returns:
learning_rate: tensor with learning rate.
steps_per_epoch: number of training steps per epoch.
"""
global_step = tf.train.get_or_create_global_step()
steps_per_epoch = float(examples_per_epoch) / float(hparams.batch_size)
if replicas_to_aggregate > 0:
steps_per_epoch /= replicas_to_aggregate
if hparams.lr_schedule == 'exp_decay':
decay_steps = long(steps_per_epoch * hparams.lr_num_epochs_per_decay)
learning_rate = tf.train.exponential_decay(
hparams.learning_rate,
global_step,
decay_steps,
hparams.lr_decay_factor,
staircase=True)
elif hparams.lr_schedule == 'step':
lr_decay_steps = [long(epoch * steps_per_epoch)
for epoch in hparams.lr_decay_epochs]
learning_rate = tf.train.piecewise_constant(
global_step, lr_decay_steps, hparams.lr_list)
elif hparams.lr_schedule == 'fixed':
learning_rate = hparams.learning_rate
else:
raise ValueError('Invalid value of lr_schedule: %s' % hparams.lr_schedule)
if replicas_to_aggregate > 0:
learning_rate *= replicas_to_aggregate
return learning_rate, steps_per_epoch
def get_optimizer(hparams, learning_rate):
"""Returns optimizer.
Args:
hparams: hyper parameters.
learning_rate: learning rate tensor.
Raises:
ValueError: if type of optimizer specified in hparams is incorrect.
Returns:
Instance of optimizer class.
"""
if hparams.optimizer == 'rms':
optimizer = tf.train.RMSPropOptimizer(learning_rate,
hparams.rmsprop_decay,
hparams.momentum,
hparams.rmsprop_epsilon)
elif hparams.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate,
hparams.momentum)
elif hparams.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise ValueError('Invalid value of optimizer: %s' % hparams.optimizer)
return optimizer
RESNET_MODELS = {'resnet_v2_50': resnet_v2.resnet_v2_50}
def get_model(model_name, num_classes):
"""Returns function which creates model.
Args:
model_name: Name of the model.
num_classes: Number of classes.
Raises:
ValueError: If model_name is invalid.
Returns:
Function, which creates model when called.
"""
if model_name.startswith('resnet'):
def resnet_model(images, is_training, reuse=tf.AUTO_REUSE):
with tf.contrib.framework.arg_scope(resnet_v2.resnet_arg_scope()):
resnet_fn = RESNET_MODELS[model_name]
logits, _ = resnet_fn(images, num_classes, is_training=is_training,
reuse=reuse)
logits = tf.reshape(logits, [-1, num_classes])
return logits
return resnet_model
else:
raise ValueError('Invalid model: %s' % model_name)
def filter_trainable_variables(trainable_scopes):
"""Keep only trainable variables which are prefixed with given scopes.
Args:
trainable_scopes: either list of trainable scopes or string with comma
separated list of trainable scopes.
This function removes all variables which are not prefixed with given
trainable_scopes from collection of trainable variables.
Useful during network fine tuning, when you only need to train subset of
variables.
"""
if not trainable_scopes:
return
if isinstance(trainable_scopes, six.string_types):
trainable_scopes = [scope.strip() for scope in trainable_scopes.split(',')]
trainable_scopes = {scope for scope in trainable_scopes if scope}
if not trainable_scopes:
return
trainable_collection = tf.get_collection_ref(
tf.GraphKeys.TRAINABLE_VARIABLES)
non_trainable_vars = [
v for v in trainable_collection
if not any([v.op.name.startswith(s) for s in trainable_scopes])
]
for v in non_trainable_vars:
trainable_collection.remove(v)
|
{
"content_hash": "ca051df764591566db8a5a2e64c9e66f",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 33.47126436781609,
"alnum_prop": 0.6639766483516484,
"repo_name": "tombstone/models",
"id": "1499a378ea1ba6511122ebe54ceed1226d38d649",
"size": "6502",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "research/adversarial_logit_pairing/model_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
'''
RGZcatalog is a pipeline that takes all of the completed RGZ subjects and creates a Mongo database containing
consensus matching information, radio morphology, IR counterpart location, and data from corresponding
AllWISE and SDSS catalogs.
'''
import logging, urllib2, time, json, os, datetime
import pymongo
import numpy as np
import StringIO, gzip
from astropy.io import fits
from astropy import wcs, coordinates as coord, units as u
#custom modules for the RGZ catalog pipeline
import catalog_functions as fn #contains miscellaneous helper functions
import processing as p #contains functions that process the data
from find_duplicates import find_duplicates #finds and marks any radio components that are duplicated between sources
from consensus import rgz_path, data_path, db, version, logfile
in_progress_file = '%s/subject_in_progress.txt' % rgz_path
def RGZcatalog():
#start timer
starttime = time.time()
#begin logging even if not run from command line
logging.basicConfig(filename='{}/{}'.format(rgz_path,logfile), level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
#connect to database of subjects
subjects = db['radio_subjects']
consensus = db['consensus{}'.format(version)]
catalog = db['catalog{}'.format(version)] #this is being populated by this program
if catalog.count():
logging.info('Catalog contains entries; appending')
else:
catalog.create_index('catalog_id', unique=True)
#get dictionary for finding the path to FITS files and WCS headers
with open('%s/first_fits.txt' % rgz_path) as f:
lines = f.readlines()
pathdict = {}
for l in lines:
spl = l.split(' ')
pathdict[spl[1].strip()] = '%s/rgz/raw_images/RGZ-full.%i/FIRST-IMGS/%s.fits' % (data_path, int(spl[0]), spl[1].strip())
#count the number of entries from this run and how many entries are in the catalog total
count = 0
if catalog.count() != 0:
for entry in catalog.find().sort('catalog_id', -1).limit(1):
IDnumber = entry['catalog_id']
else:
IDnumber = 0
#find completed catalog entries so they can be skipped
consensus_set = set()
for source in consensus.find():
consensus_set.add(source['zooniverse_id'])
catalog_set = set()
for entry in catalog.find():
catalog_set.add(entry['zooniverse_id'])
to_be_completed = consensus_set.difference(catalog_set)
if os.path.exists(in_progress_file):
with open(in_progress_file, 'r') as f:
in_progress_zid = f.read()
to_be_completed = to_be_completed.union(in_progress_zid)
to_be_completed = list(to_be_completed)
#iterate through all noncompleted subjects
for subject in subjects.find({'zooniverse_id': {'$in':to_be_completed} }).batch_size(10):
#for subject in subjects.find({'zooniverse_id': {'$in': ['ARG00000sl', 'ARG0003f9l']} }):
#for subject in subjects.find({'zooniverse_id':'ARG00000sl'}): #sample subject with distinct sources
#for subject in subjects.find({'zooniverse_id':'ARG0003f9l'}): #sample subject with multiple-component source
#mark subject as being in-progress
with open(in_progress_file, 'w') as f:
f.write(subject['zooniverse_id'])
#iterate through all consensus groupings
for source in consensus.find({'zooniverse_id':subject['zooniverse_id'], 'first_id':{'$exists':True}}):
#do not process if this object in this source is already in the catalog
process = True
for i in catalog.find({'zooniverse_id':subject['zooniverse_id']}):
if i['consensus']['label'] == source['label']:
process = False
if process:
logging.info('Processing consensus object %s within subject field %s', source['label'], subject['zooniverse_id'])
count += 1
IDnumber += 1
#display which entry is being processed to see how far the program is
print 'Processing entry %i (consensus %s in subject %s)' % (IDnumber, source['label'], subject['zooniverse_id'])
entry = {'catalog_id':IDnumber, 'zooniverse_id':str(subject['zooniverse_id'])}
#find location of FITS file; once non-FIRST sources are included, modify this
fid = source['first_id']
#if fid[0] == 'F':
fits_loc = pathdict[fid]
entry.update({'first_id':str(fid)})
#else:
# raise RuntimeError('Not expecting non-FIRST data')
# fits_loc = '%s/rgz/raw_images/ATLAS/2x2/%s_radio.fits' % (data_path, fid)
# entry.update({'atlas_id':str(fid)})
#find IR counterpart from consensus data, if present
w = wcs.WCS(fits.getheader(fits_loc, 0)) #gets pixel-to-WCS conversion from header
ir_coords = source['ir_peak']
if ir_coords[0] == -99:
ir_pos = None
wise_match = None
sdss_match = None
else:
#this only works for FIRST images; will need changing when ATLAS is added
p2w = w.wcs_pix2world
ir_ra_pixels = ir_coords[0]*w._naxis1/500.
ir_dec_pixels = 1 + w._naxis2 - ir_coords[1]*w._naxis2/500.
ir_peak = p2w( np.array([[ir_ra_pixels, ir_dec_pixels]]), 1)
ir_pos = coord.SkyCoord(ir_peak[0][0], ir_peak[0][1], unit=(u.deg,u.deg), frame='icrs')
entry.update({'consensus':{'n_radio':source['n_votes'], 'n_total':source['n_total'], 'n_ir':source['n_ir'], 'ir_flag':source['ir_flag'], \
'ir_level':source['ir_level'], 'radio_level':source['consensus_level'], 'label':source['label']}})
if ir_pos:
logging.info('IR counterpart found')
entry['consensus'].update({'ir_ra':ir_pos.ra.deg, 'ir_dec':ir_pos.dec.deg})
else:
logging.info('No IR counterpart found')
#if an IR peak exists, search AllWISE and SDSS for counterparts
if ir_pos:
wise_match = p.getWISE(entry)
if wise_match:
designation = wise_match['designation'][5:]
pz = db['wise_pz'].find_one({'wiseX':designation})
if pz is not None:
wise_match['photo_redshift'] = pz['zPhoto_Corr']
entry.update({'AllWISE':wise_match})
'''tryCount = 0
while(True):
tryCount += 1
try:
sdss_match = p.getSDSS(entry)
if sdss_match:
entry.update({'SDSS':sdss_match})
break
except KeyError as e:
if tryCount>5:
output('Bad response from SkyServer; trying again in 10 min', logging.exception)
raise fn.DataAccessError(message)
elif e.message == 'ra':
#unable to reproduce; no error when I try again, so let's just do that
logging.exception(e)
time.sleep(10)
else:
raise e'''
sdss_match = None
#try block attempts to read JSON from web; if it exists, calculate data
try:
link = subject['location']['contours'] #gets url as Unicode string
# Use local file if available
jsonfile = link.split("/")[-1]
jsonfile_path = "{0}/rgz/contours/{1}".format(data_path,jsonfile)
if os.path.exists(jsonfile_path):
with open(jsonfile_path,'r') as jf:
data = json.load(jf)
# Otherwise, read from web
else:
# Reform weblink to point to the direct S3 URL, which will work even with older SSLv3
link_s3 = "http://zooniverse-static.s3.amazonaws.com/"+link.split('http://')[-1]
tryCount = 0
while(True): #in case of error, wait 10 sec and try again; give up after 5 tries
tryCount += 1
try:
compressed = urllib2.urlopen(str(link_s3)).read() #reads contents of url to str
break
except (urllib2.URLError, urllib2.HTTPError) as e:
if tryCount>5:
output('Unable to connect to Amazon Web Services; trying again in 10 min', logging.exception)
raise fn.DataAccessError(message)
logging.exception(e)
time.sleep(10)
tempfile = StringIO.StringIO(compressed) #temporarily stores contents as file (emptied after unzipping)
uncompressed = gzip.GzipFile(fileobj=tempfile, mode='r').read() #unzips contents to str
data = json.loads(uncompressed) #loads JSON object
radio_data = p.getRadio(data, fits_loc, source)
entry.update(radio_data)
#check if a component is straddling the edge of the image
entry.update({'overedge':0})
source_bbox = np.array(source['bbox'])
for c in data['contours']:
bbox = np.array(c[0]['bbox'])
if bbox in source_bbox:
vertices = []
for pos in c[0]['arr']:
vertices.append([pos['x'], pos['y']])
vertices = np.array(vertices)
diff = vertices[0] - vertices[-1]
if np.sqrt(diff[0]**2 + diff[1]**2) > 1 and (np.any(vertices[0] <= 4) or np.any(vertices[0] >= 128)):
entry.update({'overedge':1})
break
#use WISE catalog name if available
if wise_match:
entry.update({'rgz_name':'RGZ{}{}'.format(wise_match['designation'][5:14], wise_match['designation'][15:22])})
else:
#if not, try consensus IR position
if ir_pos:
ra = ir_pos.ra.deg
dec = ir_pos.dec.deg
#finally, just use radio center
else:
ra = radio_data['radio']['ra']
dec = radio_data['radio']['dec']
ra_h = int(ra/15.)
ra_m = int((ra - ra_h*15)*4)
ra_s = (ra - ra_h*15 - ra_m/4.)*240
dec_d = int(dec)
dec_m = int((dec - dec_d)*60)
dec_s = int((dec - dec_d - dec_m/60.)*3600)
entry.update({'rgz_name':'RGZJ{:0=2}{:0=2}{:0=4.1f}{:0=+3}{:0=2}{:0=2}'.format(ra_h, ra_m, ra_s, dec_d, dec_m, dec_s)})
#calculate physical parameters using redshift from SDSS
if sdss_match:
z = 0
if 'spec_redshift' in sdss_match:
z = sdss_match['spec_redshift']
elif 'photo_redshift' in sdss_match:
z = sdss_match['photo_redshift']
if z>0:
physical = p.getPhysical(z, radio_data)
entry['radio'].update(physical)
logging.info('Radio data added')
#if the link doesn't have a JSON, no data can be determined
except urllib2.HTTPError as e:
if e.code == 404:
logging.info('No radio JSON detected')
else:
logging.exception(e)
raise
catalog.insert(entry)
find_duplicates(entry['zooniverse_id'])
logging.info('Entry %i added to catalog', IDnumber)
with open(in_progress_file, 'w') as f:
f.write('')
#end timer
endtime = time.time()
output('Time taken: %f' % (endtime-starttime))
return count
def output(string, fn=logging.info):
'''
Print a string to screen and the logfile
'''
fn(string)
print string
if __name__ == '__main__':
logging.basicConfig(filename='{}/{}'.format(rgz_path,logfile), level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
logging.info('Catalog run from command line')
assert db['radio_subjects'].count()>0, 'RGZ subjects collection not in Mongo database'
assert db['consensus{}'.format(version)].count()>0, 'RGZ consensus{} collection not in Mongo database'.format(version)
assert db['wise_pz'].count()>0, 'WISExSCOSPZ catalog not in Mongo database'
done = False
while not done:
try:
output('%i entries added.' % RGZcatalog())
done = True
except pymongo.errors.CursorNotFound as c:
time.sleep(10)
output('Cursor timed out; starting again.')
except fn.DataAccessError as d:
resume = datetime.datetime.now() + datetime.timedelta(minutes=10)
output("RGZcatalog.py can't connect to external server; will resume at {:%H:%M}".format(resume))
time.sleep(600)
except BaseException as e:
logging.exception(e)
raise
|
{
"content_hash": "08eb9300bda31fd5e2ce446c3729805f",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 148,
"avg_line_length": 37.48692810457516,
"alnum_prop": 0.6460639874466044,
"repo_name": "willettk/rgz-analysis",
"id": "533cf6669d5e46d70924ee05b9da94b72bc11f0a",
"size": "11471",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/RGZcatalog.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "147317"
},
{
"name": "Python",
"bytes": "691021"
},
{
"name": "Ruby",
"bytes": "3598"
},
{
"name": "Shell",
"bytes": "6723"
},
{
"name": "TeX",
"bytes": "40897"
}
],
"symlink_target": ""
}
|
"""
Django settings for compass project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9bbtp$+w_n)dp!my4&!213m!re8ctq9f^@ydm!tfzdxw6aa8+w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jinja',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'compass.urls'
TEMPLATES = [
{
"BACKEND": "django_jinja.backend.Jinja2",
# "APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja2",
}
},
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ['templates'],
"APP_DIRS": True
},
]
# TEMPLATES = [
# {
# "BACKEND": "django_jinja.backend.Jinja2",
# "APP_DIRS": True,
# "OPTIONS": {
# "match_extension": ".jinja2",
# }
# },
# {
# 'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [os.path.join(BASE_DIR,'templates')],
# 'APP_DIRS': True,
# 'OPTIONS': {
# 'context_processors': [
# 'django.template.context_processors.debug',
# 'django.template.context_processors.request',
# 'django.contrib.auth.context_processors.auth',
# 'django.contrib.messages.context_processors.messages',
# ],
# },
# },
# ]
WSGI_APPLICATION = 'compass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "e3fefaed742a043a890654bdcb60c228",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 72,
"avg_line_length": 25.784,
"alnum_prop": 0.6472230840831523,
"repo_name": "patrickspencer/django_jinja_test_repo",
"id": "2e8c440f6d64b887751ddea17a49383f4e4d0319",
"size": "3223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compass/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4762"
}
],
"symlink_target": ""
}
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import pytest
from gremlin_python.process.traversal import Bytecode, P
from gremlin_python.process.graph_traversal import (
GraphTraversalSource, GraphTraversal)
from gremlin_python.process.graph_traversal import __ as AnonymousTraversal
from gremlin_python.structure.graph import Graph
__author__ = 'David M. Brown (davebshow@gmail.com)'
class SocialTraversal(GraphTraversal):
def knows(self, person_name):
return self.out("knows").hasLabel("person").has("name", person_name)
def youngestFriendsAge(self):
return self.out("knows").hasLabel("person").values("age").min()
def createdAtLeast(self, number):
return self.outE("created").count().is_(P.gte(number))
class __(AnonymousTraversal):
graph_traversal = SocialTraversal
@classmethod
def knows(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).knows(*args)
@classmethod
def youngestFriendsAge(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).youngestFriendsAge(*args)
@classmethod
def createdAtLeast(cls, *args):
return cls.graph_traversal(None, None, Bytecode()).createdAtLeast(*args)
class SocialTraversalSource(GraphTraversalSource):
def __init__(self, *args, **kwargs):
super(SocialTraversalSource, self).__init__(*args, **kwargs)
self.graph_traversal = SocialTraversal
def persons(self, *args):
traversal = self.get_graph_traversal().V().hasLabel("person")
if len(args) > 0:
traversal = traversal.has("name", P.within(*args))
return traversal
def test_dsl(remote_connection):
social = Graph().traversal(SocialTraversalSource).withRemote(remote_connection)
assert social.persons("marko").knows("josh").next()
assert social.persons("marko").youngestFriendsAge().next() == 27
assert social.persons().count().next() == 4
assert social.persons("marko", "josh").count().next() == 2
assert social.persons().filter(__.createdAtLeast(2)).count().next() == 1
|
{
"content_hash": "0a771c56a0fe804d712c2ddfdb2d2156",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 84,
"avg_line_length": 35.35443037974684,
"alnum_prop": 0.7178660938059435,
"repo_name": "pluradj/incubator-tinkerpop",
"id": "6ec3b46a7377d73d49d4bf490309fd1934a2a7a2",
"size": "2793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/jython/tests/process/test_dsl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "369370"
},
{
"name": "Java",
"bytes": "6508842"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "24104"
}
],
"symlink_target": ""
}
|
from pandas.compat import range
import numpy as np
from pandas.core.api import Series, Categorical
import pandas as pd
import pandas.core.algorithms as algos
import pandas.util.testing as tm
import pandas.hashtable as hashtable
class TestMatch(tm.TestCase):
_multiprocess_can_split_ = True
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = pd.Series(np.arange(5),dtype=np.float32)
result = algos.match(s, [2,4])
expected = np.array([-1, -1, 0, -1, 1])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2,4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result,expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1])
self.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result,expected)
class TestFactorize(tm.TestCase):
_multiprocess_can_split_ = True
def test_warn(self):
s = Series([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
algos.factorize(s, order='A')
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'])
# self.assert_numpy_array_equal(labels, np.array([ 0, 1, 1, 0, 0, 2, 2, 2],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['a','b','c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
self.assert_numpy_array_equal(labels, np.array([ 0, 1, 1, 0, 0, 2, 2, 2],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['a','b','c'], dtype=object))
labels, uniques = algos.factorize(list(reversed(range(5))))
self.assert_numpy_array_equal(labels, np.array([0, 1, 2, 3, 4], dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
self.assert_numpy_array_equal(labels, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([0, 1, 2, 3, 4], dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
self.assert_numpy_array_equal(labels, np.array([0., 1., 2., 3., 4.], dtype=np.float64))
self.assert_numpy_array_equal(uniques, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
labels, uniques = algos.factorize(list(reversed(np.arange(5.))), sort=True)
self.assert_numpy_array_equal(labels, np.array([ 4, 3, 2, 1, 0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([0., 1., 2., 3., 4.], dtype=np.float64))
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0, 0, -1, 1, 2, 3],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array(['A', 'B', 3.14, np.inf], dtype=object))
labels, uniques = algos.factorize(x, sort=True)
self.assert_numpy_array_equal(labels, np.array([ 2, 2, -1, 3, 0, 1],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([3.14, np.inf, 'A', 'B'], dtype=object))
def test_datelike(self):
# M8
v1 = pd.Timestamp('20130101 09:00:00.00004')
v2 = pd.Timestamp('20130101')
x = Series([v1,v1,v1,v2,v2,v1])
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([v1.value,v2.value],dtype='M8[ns]'))
labels, uniques = algos.factorize(x, sort=True)
self.assert_numpy_array_equal(labels, np.array([ 1,1,1,0,0,1],dtype=np.int64))
self.assert_numpy_array_equal(uniques, np.array([v2.value,v1.value],dtype='M8[ns]'))
# period
v1 = pd.Period('201302',freq='M')
v2 = pd.Period('201303',freq='M')
x = Series([v1,v1,v1,v2,v2,v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x,sort=True)
self.assert_numpy_array_equal(labels, np.array([ 0,0,0,1,1,0],dtype=np.int64))
self.assert_numpy_array_equal(uniques, pd.PeriodIndex([v1, v2]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = hashtable.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
self.assertTrue(np.array_equal(pd.isnull(key), expected == na_sentinel))
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel)
expected = np.array([ 2, -1, 0], dtype='int32')
self.assertEqual(len(set(key)), len(set(expected)))
self.assertTrue(np.array_equal(pd.isnull(key), expected == na_sentinel))
def test_vector_resize(self):
# Test for memory errors after internal vector
# reallocations (pull request #7157)
def _test_vector_resize(htable, uniques, dtype, nvals):
vals = np.array(np.random.randn(1000), dtype=dtype)
# get_labels appends to the vector
htable.get_labels(vals[:nvals], uniques, 0, -1)
# to_array resizes the vector
uniques.to_array()
htable.get_labels(vals, uniques, 0, -1)
test_cases = [
(hashtable.PyObjectHashTable, hashtable.ObjectVector, 'object'),
(hashtable.Float64HashTable, hashtable.Float64Vector, 'float64'),
(hashtable.Int64HashTable, hashtable.Int64Vector, 'int64')]
for (tbl, vect, dtype) in test_cases:
# resizing to empty is a special case
_test_vector_resize(tbl(), vect(), dtype, 0)
_test_vector_resize(tbl(), vect(), dtype, 10)
class TestUnique(tm.TestCase):
_multiprocess_can_split_ = True
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
tm.assert_isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
tm.assert_isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5),
np.tile(np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
class TestValueCounts(tm.TestCase):
_multiprocess_can_split_ = True
def test_value_counts(self):
np.random.seed(1234)
from pandas.tools.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
tm.assert_isinstance(factor, Categorical)
result = algos.value_counts(factor)
expected = algos.value_counts(np.asarray(factor))
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
self.assertEqual(result.tolist(), [4])
self.assertEqual(result.index[0], 0.997)
result = algos.value_counts(s, bins=2, sort=False)
self.assertEqual(result.tolist(), [2, 2])
self.assertEqual(result.index[0], 0.997)
self.assertEqual(result.index[1], 2.5)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
self.assertEqual(len(result), 1)
result = algos.value_counts([1, 1.], bins=1)
self.assertEqual(len(result), 1)
result = algos.value_counts(Series([1, 1., '1'])) # object
self.assertEqual(len(result), 2)
self.assertRaises(TypeError, lambda s: algos.value_counts(s, bins=1), ['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
self.assertEqual(len(vc), 1)
self.assertEqual(len(vc_with_na), 2)
exp_dt = pd.Series({pd.Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_dropna(self):
# https://github.com/pydata/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=True),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False]).value_counts(dropna=False),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False, None]).value_counts(dropna=True),
pd.Series([2, 1], index=[True, False]))
tm.assert_series_equal(
pd.Series([True, True, False, None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5.]).value_counts(dropna=True),
pd.Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5.]).value_counts(dropna=False),
pd.Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5., None]).value_counts(dropna=True),
pd.Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
pd.Series([10.3, 5., 5., None]).value_counts(dropna=False),
pd.Series([2, 1, 1], index=[5., 10.3, np.nan]))
def test_quantile():
s = Series(np.random.randn(100))
result = algos.quantile(s, [0, .25, .5, .75, 1.])
expected = algos.quantile(s.values, [0, .25, .5, .75, 1.])
tm.assert_almost_equal(result, expected)
def test_unique_label_indices():
from pandas.hashtable import unique_label_indices
a = np.random.randint(1, 1 << 10, 1 << 15).astype('i8')
left = unique_label_indices(a)
right = np.unique(a, return_index=True)[1]
tm.assert_array_equal(left, right)
a[np.random.choice(len(a), 10)] = -1
left= unique_label_indices(a)
right = np.unique(a, return_index=True)[1][1:]
tm.assert_array_equal(left, right)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
{
"content_hash": "23c5ce76dd9713716f5b11c0e8e521bd",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 99,
"avg_line_length": 39.72115384615385,
"alnum_prop": 0.5862987170176712,
"repo_name": "bdh1011/wau",
"id": "c80cea3ab7a7da18c59dae3f979af9b49e208a12",
"size": "12417",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pandas/tests/test_algos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
"""Test class for common methods used by iLO modules."""
import tempfile
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common import image_service
from ironic.common import images
from ironic.common import keystone
from ironic.common import states
from ironic.common import swift
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import deploy as ilo_deploy
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloDeployPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IloDeployPrivateMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo', driver_info=INFO_DICT)
def test__get_boot_iso_object_name(self):
boot_iso_actual = ilo_deploy._get_boot_iso_object_name(self.node)
boot_iso_expected = "boot-%s" % self.node.uuid
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href')
def test__get_boot_iso_http_url(self, service_mock):
url = 'http://abc.org/image/qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
service_mock.assert_called_once_with(url)
self.assertEqual(url, boot_iso_actual)
@mock.patch.object(image_service.HttpImageService, 'validate_href')
def test__get_boot_iso_url(self, mock_validate):
url = 'http://aaa/bbb'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
self.assertEqual(url, boot_iso_actual)
mock_validate.assert_called_ince_with(url)
@mock.patch.object(image_service.HttpImageService, 'validate_href')
def test__get_boot_iso_unsupported_url(self, validate_href_mock):
validate_href_mock.side_effect = exception.ImageRefValidationFailed(
image_href='file://img.qcow2', reason='fail')
url = 'file://img.qcow2'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = url
self.node.instance_info = i_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.ImageRefValidationFailed,
ilo_deploy._get_boot_iso, task, 'root-uuid')
@mock.patch.object(images, 'get_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_glance_image(self, deploy_info_mock,
image_props_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': 'boot-iso-uuid',
'kernel_id': None,
'ramdisk_id': None}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_iso_expected = 'boot-iso-uuid'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(deploy_utils, 'get_boot_mode_for_deploy')
@mock.patch.object(images, 'get_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_uefi_no_glance_image(self,
deploy_info_mock,
image_props_mock,
boot_mode_mock):
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': None,
'ramdisk_id': None}
properties = {'capabilities': 'boot_mode:uefi'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
boot_iso_result = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(
task.context, 'image-uuid',
['boot_iso', 'kernel_id', 'ramdisk_id'])
self.assertFalse(boot_mode_mock.called)
self.assertIsNone(boot_iso_result)
@mock.patch.object(tempfile, 'NamedTemporaryFile')
@mock.patch.object(images, 'create_boot_iso')
@mock.patch.object(swift, 'SwiftAPI')
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name')
@mock.patch.object(driver_utils, 'get_node_capability')
@mock.patch.object(images, 'get_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
def test__get_boot_iso_create(self, deploy_info_mock, image_props_mock,
capability_mock, boot_object_name_mock,
swift_api_mock,
create_boot_iso_mock, tempfile_mock):
CONF.keystone_authtoken.auth_uri = 'http://authurl'
CONF.ilo.swift_ilo_container = 'ilo-cont'
CONF.pxe.pxe_append_params = 'kernel-params'
swift_obj_mock = swift_api_mock.return_value
fileobj_mock = mock.MagicMock()
fileobj_mock.name = 'tmpfile'
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = fileobj_mock
tempfile_mock.return_value = mock_file_handle
deploy_info_mock.return_value = {'image_source': 'image-uuid',
'ilo_deploy_iso': 'deploy_iso_uuid'}
image_props_mock.return_value = {'boot_iso': None,
'kernel_id': 'kernel_uuid',
'ramdisk_id': 'ramdisk_uuid'}
boot_object_name_mock.return_value = 'abcdef'
create_boot_iso_mock.return_value = '/path/to/boot-iso'
capability_mock.return_value = 'uefi'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
boot_iso_actual = ilo_deploy._get_boot_iso(task, 'root-uuid')
deploy_info_mock.assert_called_once_with(task.node)
image_props_mock.assert_called_once_with(task.context,
'image-uuid', ['boot_iso', 'kernel_id', 'ramdisk_id'])
boot_object_name_mock.assert_called_once_with(task.node)
create_boot_iso_mock.assert_called_once_with(task.context,
'tmpfile',
'kernel_uuid',
'ramdisk_uuid',
'deploy_iso_uuid',
'root-uuid',
'kernel-params',
'uefi')
swift_obj_mock.create_object.assert_called_once_with('ilo-cont',
'abcdef',
'tmpfile')
boot_iso_expected = 'swift:abcdef'
self.assertEqual(boot_iso_expected, boot_iso_actual)
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name')
@mock.patch.object(swift, 'SwiftAPI')
def test__clean_up_boot_iso_for_instance(self, swift_mock,
boot_object_name_mock):
swift_obj_mock = swift_mock.return_value
CONF.ilo.swift_ilo_container = 'ilo-cont'
boot_object_name_mock.return_value = 'boot-object'
i_info = self.node.instance_info
i_info['ilo_boot_iso'] = 'swift:bootiso'
self.node.instance_info = i_info
self.node.save()
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
swift_obj_mock.delete_object.assert_called_once_with('ilo-cont',
'boot-object')
@mock.patch.object(ilo_deploy, '_get_boot_iso_object_name')
def test__clean_up_boot_iso_for_instance_no_boot_iso(
self, boot_object_name_mock):
ilo_deploy._clean_up_boot_iso_for_instance(self.node)
self.assertFalse(boot_object_name_mock.called)
@mock.patch.object(deploy_utils, 'check_for_missing_params')
def test__parse_driver_info(self, check_params_mock):
self.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
driver_info_expected = {'ilo_deploy_iso': 'deploy-iso-uuid'}
driver_info_actual = ilo_deploy._parse_driver_info(self.node)
error_msg = ("Error validating iLO virtual media deploy. Some"
" parameters were missing in node's driver_info")
check_params_mock.assert_called_once_with(driver_info_expected,
error_msg)
self.assertEqual(driver_info_expected, driver_info_actual)
@mock.patch.object(ilo_deploy, '_parse_driver_info')
@mock.patch.object(iscsi_deploy, 'parse_instance_info')
def test__parse_deploy_info(self, instance_info_mock, driver_info_mock):
instance_info_mock.return_value = {'a': 'b'}
driver_info_mock.return_value = {'c': 'd'}
expected_info = {'a': 'b', 'c': 'd'}
actual_info = ilo_deploy._parse_deploy_info(self.node)
self.assertEqual(expected_info, actual_info)
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
def test__reboot_into(self, setup_vmedia_mock, set_boot_device_mock,
node_power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
opts = {'a': 'b'}
ilo_deploy._reboot_into(task, 'iso', opts)
setup_vmedia_mock.assert_called_once_with(task, 'iso', opts)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM)
node_power_action_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ilo_deploy, '_reboot_into')
@mock.patch.object(agent, 'build_agent_options')
def test__prepare_agent_vmedia_boot(self, build_options_mock,
reboot_into_mock):
deploy_opts = {'a': 'b'}
build_options_mock.return_value = deploy_opts
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso-uuid'
ilo_deploy._prepare_agent_vmedia_boot(task)
build_options_mock.assert_called_once_with(task.node)
reboot_into_mock.assert_called_once_with(task,
'deploy-iso-uuid',
deploy_opts)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
def test__update_secure_boot_mode_passed_true(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = True
ilo_deploy._update_secure_boot_mode(task, True)
func_set_secure_boot_mode.assert_called_once_with(task, True)
@mock.patch.object(deploy_utils, 'is_secure_boot_requested')
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
def test__update_secure_boot_mode_passed_False(self,
func_set_secure_boot_mode,
func_is_secure_boot_req):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_is_secure_boot_req.return_value = False
ilo_deploy._update_secure_boot_mode(task, False)
self.assertFalse(func_set_secure_boot_mode.called)
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
@mock.patch.object(ilo_common, 'get_secure_boot_mode')
def test__disable_secure_boot_false(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = False
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertFalse(func_set_secure_boot_mode.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'set_secure_boot_mode')
@mock.patch.object(ilo_common, 'get_secure_boot_mode')
def test__disable_secure_boot_true(self,
func_get_secure_boot_mode,
func_set_secure_boot_mode):
func_get_secure_boot_mode.return_value = True
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
func_set_secure_boot_mode.assert_called_once_with(task, False)
self.assertTrue(returned_state)
@mock.patch.object(ilo_deploy.LOG, 'debug')
@mock.patch.object(ilo_deploy, 'exception')
@mock.patch.object(ilo_common, 'get_secure_boot_mode')
def test__disable_secure_boot_exception(self,
func_get_secure_boot_mode,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
func_get_secure_boot_mode.side_effect = Exception
returned_state = ilo_deploy._disable_secure_boot(task)
func_get_secure_boot_mode.assert_called_once_with(task)
self.assertTrue(mock_log.called)
self.assertFalse(returned_state)
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy_sec_boot_on(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
ret_boot_mode = task.node.instance_info['deploy_boot_mode']
self.assertEqual('uefi', ret_boot_mode)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy_inst_info(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = False
task.node.instance_info = instance_info
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
func_update_boot_mode.assert_called_once_with(task)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
self.assertIsNone(deploy_boot_mode)
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_disable_secure_boot')
@mock.patch.object(manager_utils, 'node_power_action')
def test__prepare_node_for_deploy_sec_boot_on_inst_info(self,
func_node_power_action,
func_disable_secure_boot,
func_update_boot_mode):
instance_info = {'capabilities': '{"secure_boot": "true"}'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
func_disable_secure_boot.return_value = True
task.node.instance_info = instance_info
ilo_deploy._prepare_node_for_deploy(task)
func_node_power_action.assert_called_once_with(task,
states.POWER_OFF)
func_disable_secure_boot.assert_called_once_with(task)
self.assertFalse(func_update_boot_mode.called)
bootmode = driver_utils.get_node_capability(task.node, "boot_mode")
self.assertIsNone(bootmode)
deploy_boot_mode = task.node.instance_info.get('deploy_boot_mode')
self.assertIsNone(deploy_boot_mode)
class IloVirtualMediaIscsiDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaIscsiDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo', driver_info=INFO_DICT)
@mock.patch.object(driver_utils, 'validate_secure_boot_capability')
@mock.patch.object(driver_utils, 'validate_boot_mode_capability')
@mock.patch.object(iscsi_deploy, 'validate_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
@mock.patch.object(iscsi_deploy, 'validate')
def _test_validate(self, validate_mock,
deploy_info_mock,
validate_prop_mock,
validate_boot_mode_mock,
validate_secure_boot_mock,
props_expected):
d_info = {'image_source': 'uuid'}
deploy_info_mock.return_value = d_info
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, props_expected)
validate_boot_mode_mock.assert_called_once_with(task.node)
validate_secure_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(iscsi_deploy, 'validate_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
@mock.patch.object(iscsi_deploy, 'validate')
def test_validate_invalid_boot_option(self,
validate_mock,
deploy_info_mock,
validate_prop_mock):
d_info = {'image_source': 'uuid'}
properties = {'capabilities': 'boot_mode:uefi,boot_option:foo'}
deploy_info_mock.return_value = d_info
props = ['kernel_id', 'ramdisk_id']
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
exc = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate,
task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, props)
self.assertIn('boot_option', str(exc))
@mock.patch.object(iscsi_deploy, 'validate_image_properties')
@mock.patch.object(ilo_deploy, '_parse_deploy_info')
@mock.patch.object(iscsi_deploy, 'validate')
def test_validate_invalid_boot_mode(self,
validate_mock,
deploy_info_mock,
validate_prop_mock):
d_info = {'image_source': 'uuid'}
properties = {'capabilities': 'boot_mode:foo,boot_option:local'}
deploy_info_mock.return_value = d_info
props = ['kernel_id', 'ramdisk_id']
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = properties
exc = self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.validate,
task)
validate_mock.assert_called_once_with(task)
deploy_info_mock.assert_called_once_with(task.node)
validate_prop_mock.assert_called_once_with(task.context,
d_info, props)
self.assertIn('boot_mode', str(exc))
@mock.patch.object(service_utils, 'is_glance_image')
def test_validate_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = True
self._test_validate(props_expected=['kernel_id', 'ramdisk_id'])
def test_validate_whole_disk_image(self):
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
self._test_validate(props_expected=[])
@mock.patch.object(service_utils, 'is_glance_image')
def test_validate_non_glance_partition_image(self, is_glance_image_mock):
is_glance_image_mock.return_value = False
self._test_validate(props_expected=['kernel', 'ramdisk'])
@mock.patch.object(ilo_deploy, '_reboot_into')
@mock.patch.object(deploy_utils, 'get_single_nic_with_vif_port_id')
@mock.patch.object(agent, 'build_agent_options')
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options')
@mock.patch.object(iscsi_deploy, 'check_image_size')
@mock.patch.object(iscsi_deploy, 'cache_instance_image')
def test_deploy(self,
cache_instance_image_mock,
check_image_size_mock,
build_opts_mock,
agent_options_mock,
get_nic_mock,
reboot_into_mock):
deploy_opts = {'a': 'b'}
agent_options_mock.return_value = {
'ipa-api-url': 'http://1.2.3.4:6385'}
build_opts_mock.return_value = deploy_opts
get_nic_mock.return_value = '12:34:56:78:90:ab'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_info['ilo_deploy_iso'] = 'deploy-iso'
returned_state = task.driver.deploy.deploy(task)
cache_instance_image_mock.assert_called_once_with(task.context,
task.node)
check_image_size_mock.assert_called_once_with(task)
expected_ramdisk_opts = {'a': 'b', 'BOOTIF': '12:34:56:78:90:ab',
'ipa-api-url': 'http://1.2.3.4:6385'}
build_opts_mock.assert_called_once_with(task.node)
get_nic_mock.assert_called_once_with(task)
reboot_into_mock.assert_called_once_with(task, 'deploy-iso',
expected_ramdisk_opts)
self.assertEqual(states.DEPLOYWAIT, returned_state)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self,
node_power_action_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy.LOG, 'warn')
@mock.patch.object(ilo_deploy, 'exception')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down_handle_exception(self,
node_power_action_mock,
update_secure_boot_mode_mock,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
update_secure_boot_mode_mock.side_effect = Exception
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertTrue(mock_log.called)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy, '_clean_up_boot_iso_for_instance')
@mock.patch.object(iscsi_deploy, 'destroy_images')
def test_clean_up(self, destroy_images_mock, clean_up_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.clean_up(task)
destroy_images_mock.assert_called_once_with(task.node.uuid)
clean_up_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy')
def test_prepare(self, func_prepare_node_for_deploy):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
func_prepare_node_for_deploy.assert_called_once_with(task)
class IloVirtualMediaAgentDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(driver_utils, 'validate_secure_boot_capability')
@mock.patch.object(driver_utils, 'validate_boot_mode_capability')
@mock.patch.object(ilo_deploy, '_parse_driver_info')
def test_validate(self,
parse_driver_info_mock,
validate_boot_mode_mock,
validate_secure_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
parse_driver_info_mock.assert_called_once_with(task.node)
validate_boot_mode_mock.assert_called_once_with(task.node)
validate_secure_boot_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_deploy, '_prepare_agent_vmedia_boot')
def test_deploy(self, vmedia_boot_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.deploy(task)
vmedia_boot_mock.assert_called_once_with(task)
self.assertEqual(states.DEPLOYWAIT, returned_state)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down(self,
node_power_action_mock,
update_secure_boot_mode_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy.LOG, 'warn')
@mock.patch.object(ilo_deploy, 'exception')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down_handle_exception(self,
node_power_action_mock,
update_secure_boot_mode_mock,
exception_mock,
mock_log):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
exception_mock.IloOperationNotSupported = Exception
update_secure_boot_mode_mock.side_effect = Exception
returned_state = task.driver.deploy.tear_down(task)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
update_secure_boot_mode_mock.assert_called_once_with(task, False)
self.assertTrue(mock_log.called)
self.assertEqual(states.DELETED, returned_state)
@mock.patch.object(ilo_deploy, '_prepare_node_for_deploy')
@mock.patch.object(agent, 'build_instance_info_for_deploy')
def test_prepare(self,
build_instance_info_mock,
func_prepare_node_for_deploy):
deploy_opts = {'a': 'b'}
build_instance_info_mock.return_value = deploy_opts
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.prepare(task)
self.assertEqual(deploy_opts, task.node.instance_info)
func_prepare_node_for_deploy.assert_called_once_with(task)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports')
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.create_cleaning_ports')
@mock.patch.object(ilo_deploy, '_prepare_agent_vmedia_boot')
def test_prepare_cleaning(self, vmedia_boot_mock, create_port_mock,
delete_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
returned_state = task.driver.deploy.prepare_cleaning(task)
vmedia_boot_mock.assert_called_once_with(task)
self.assertEqual(states.CLEANING, returned_state)
create_port_mock.assert_called_once_with(task)
delete_mock.assert_called_once_with(task)
@mock.patch('ironic.dhcp.neutron.NeutronDHCPApi.delete_cleaning_ports')
@mock.patch.object(manager_utils, 'node_power_action')
def test_tear_down_cleaning(self, power_mock, delete_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.driver.deploy.tear_down_cleaning(task)
power_mock.assert_called_once_with(task, states.POWER_OFF)
delete_mock.assert_called_once_with(task)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step')
def test_execute_clean_step(self, execute_mock):
with task_manager.acquire(
self.context, self.node['uuid'], shared=False) as task:
task.driver.deploy.execute_clean_step(task, 'fake-step')
execute_mock.assert_called_once_with(task, 'fake-step')
@mock.patch.object(deploy_utils, 'agent_get_clean_steps')
def test_get_clean_steps_with_conf_option(self, get_clean_step_mock):
self.config(clean_priority_erase_devices=20, group='ilo')
get_clean_step_mock.return_value = [{
'step': 'erase_devices',
'priority': 10,
'interface': 'deploy',
'reboot_requested': False
}]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
step = task.driver.deploy.get_clean_steps(task)
get_clean_step_mock.assert_called_once_with(task)
self.assertEqual(step[0].get('priority'),
CONF.ilo.clean_priority_erase_devices)
@mock.patch.object(deploy_utils, 'agent_get_clean_steps')
def test_get_clean_steps_without_conf_option(self, get_clean_step_mock):
get_clean_step_mock.return_value = [{
'step': 'erase_devices',
'priority': 10,
'interface': 'deploy',
'reboot_requested': False
}]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
step = task.driver.deploy.get_clean_steps(task)
get_clean_step_mock.assert_called_once_with(task)
self.assertEqual(step[0].get('priority'), 10)
class VendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(VendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo',
driver_info=INFO_DICT)
@mock.patch.object(iscsi_deploy, 'get_deploy_info')
def test_validate_pass_deploy_info(self, get_deploy_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_deploy.VendorPassthru()
vendor.validate(task, method='pass_deploy_info', foo='bar')
get_deploy_info_mock.assert_called_once_with(task.node,
foo='bar')
@mock.patch.object(iscsi_deploy, 'validate_pass_bootloader_info_input',
autospec=True)
def test_validate_pass_bootloader_install_info(self,
validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
kwargs = {'address': '1.2.3.4', 'key': 'fake-key',
'status': 'SUCCEEDED', 'error': ''}
task.driver.vendor.validate(
task, method='pass_bootloader_install_info', **kwargs)
validate_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(iscsi_deploy, 'get_deploy_info')
def test_validate_heartbeat(self, get_deploy_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_deploy.VendorPassthru()
vendor.validate(task, method='heartbeat', foo='bar')
self.assertFalse(get_deploy_info_mock.called)
@mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
autospec=True)
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
def test_pass_bootloader_install_info(self, finish_deploy_mock,
validate_input_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
finish_deploy_mock.assert_called_once_with(task, '123456')
validate_input_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_common, 'setup_vmedia_for_boot')
@mock.patch.object(ilo_deploy, '_get_boot_iso')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_pass_deploy_info_good(self, cleanup_vmedia_boot_mock,
continue_deploy_mock, get_boot_iso_mock,
setup_vmedia_mock, set_boot_device_mock,
func_update_boot_mode,
func_update_secure_boot_mode,
notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_boot_iso_mock.return_value = 'boot-iso'
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_boot_iso_mock.assert_called_once_with(task, 'root-uuid')
setup_vmedia_mock.assert_called_once_with(task, 'boot-iso')
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.CDROM,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
self.assertEqual('boot-iso',
task.node.instance_info['ilo_boot_iso'])
notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_pass_deploy_info_bad(self, cleanup_vmedia_boot_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.AVAILABLE
self.node.target_provision_state = states.NOSTATE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
self.assertRaises(exception.InvalidState,
vendor.pass_deploy_info,
task, **kwargs)
self.assertEqual(states.AVAILABLE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
self.assertFalse(cleanup_vmedia_boot_mock.called)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
@mock.patch.object(ilo_deploy, '_get_boot_iso')
def test_pass_deploy_info_create_boot_iso_fail(self, get_iso_mock,
cleanup_vmedia_boot_mock, continue_deploy_mock, node_power_mock,
update_boot_mode_mock, update_secure_boot_mode_mock):
kwargs = {'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': 'root-uuid'}
get_iso_mock.side_effect = exception.ImageCreationFailed(
image_type='iso', error="error")
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
update_boot_mode_mock.assert_called_once_with(task)
update_secure_boot_mode_mock.assert_called_once_with(task, True)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
get_iso_mock.assert_called_once_with(task, 'root-uuid')
node_power_mock.assert_called_once_with(task, states.POWER_OFF)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(iscsi_deploy, 'continue_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_pass_deploy_info_boot_option_local(
self, cleanup_vmedia_boot_mock, continue_deploy_mock,
func_update_boot_mode, func_update_secure_boot_mode,
set_boot_device_mock, notify_ramdisk_to_proceed_mock,
finish_deploy_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
notify_ramdisk_to_proceed_mock.assert_called_once_with('123456')
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertFalse(finish_deploy_mock.called)
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
@mock.patch.object(manager_utils, 'node_set_boot_device', autospec=True)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode', autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot', autospec=True)
def _test_pass_deploy_info_whole_disk_image(
self, cleanup_vmedia_boot_mock, continue_deploy_mock,
func_update_boot_mode, func_update_secure_boot_mode,
set_boot_device_mock, notify_ramdisk_to_proceed_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
continue_deploy_mock.return_value = {'root uuid': '<some-uuid>'}
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = task.driver.vendor
vendor.pass_deploy_info(task, **kwargs)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
continue_deploy_mock.assert_called_once_with(task, **kwargs)
set_boot_device_mock.assert_called_once_with(task,
boot_devices.DISK,
persistent=True)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
iscsi_deploy.finish_deploy.assert_called_once_with(task, '123456')
def test_pass_deploy_info_whole_disk_image_local(self):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
self.node.save()
self._test_pass_deploy_info_whole_disk_image()
def test_pass_deploy_info_whole_disk_image(self):
self._test_pass_deploy_info_whole_disk_image()
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(keystone, 'get_admin_auth_token')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(ilo_deploy.VendorPassthru, '_configure_vmedia_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_continue_deploy_netboot(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_vmedia_boot_mock,
reboot_and_finish_deploy_mock,
keystone_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid'}
keystone_mock.return_value = 'admin-token'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_vmedia_boot_mock.assert_called_once_with(
task, 'some-root-uuid')
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(task)
# Ensure that admin token is populated in task
self.assertEqual('admin-token', task.context.auth_token)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_continue_deploy_localboot(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
task, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(task)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_continue_deploy_whole_disk_image(
self, cleanup_vmedia_boot_mock, do_agent_iscsi_deploy_mock,
configure_local_boot_mock, reboot_and_finish_deploy_mock,
boot_mode_cap_mock, update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.driver_internal_info = {'is_whole_disk_image': True}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'disk identifier': 'some-disk-id'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
task, root_uuid=None, efi_system_part_uuid=None)
reboot_and_finish_deploy_mock.assert_called_once_with(task)
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot')
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy')
@mock.patch.object(ilo_common, 'cleanup_vmedia_boot')
def test_continue_deploy_localboot_uefi(self, cleanup_vmedia_boot_mock,
do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock,
boot_mode_cap_mock,
update_secure_boot_mock):
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.DEPLOYING
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
do_agent_iscsi_deploy_mock.return_value = {
'root uuid': 'some-root-uuid',
'efi system partition uuid': 'efi-system-part-uuid'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.continue_deploy(task)
cleanup_vmedia_boot_mock.assert_called_once_with(task)
do_agent_iscsi_deploy_mock.assert_called_once_with(task,
mock.ANY)
configure_local_boot_mock.assert_called_once_with(
task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-system-part-uuid')
boot_mode_cap_mock.assert_called_once_with(task)
update_secure_boot_mock.assert_called_once_with(task, True)
reboot_and_finish_deploy_mock.assert_called_once_with(task)
class IloPXEDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='pxe_ilo', driver_info=INFO_DICT)
@mock.patch.object(pxe.PXEDeploy, 'validate')
def test_validate(self, pxe_validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.validate(task)
pxe_validate_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEDeploy, 'prepare')
@mock.patch.object(ilo_common, 'update_boot_mode')
def test_prepare(self,
update_boot_mode_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.driver.deploy.prepare(task)
update_boot_mode_mock.assert_called_once_with(task)
pxe_prepare_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEDeploy, 'prepare')
@mock.patch.object(ilo_common, 'update_boot_mode')
def test_prepare_uefi_whole_disk_image_fail(self,
update_boot_mode_mock,
pxe_prepare_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
task.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
task.driver.deploy.prepare, task)
update_boot_mode_mock.assert_called_once_with(task)
self.assertFalse(pxe_prepare_mock.called)
@mock.patch.object(pxe.PXEDeploy, 'deploy')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_deploy_boot_mode_exists(self, set_persistent_mock,
pxe_deploy_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.deploy.deploy(task)
set_persistent_mock.assert_called_with(task, boot_devices.PXE)
pxe_deploy_mock.assert_called_once_with(task)
class IloPXEVendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(IloPXEVendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="pxe_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='pxe_ilo', driver_info=INFO_DICT)
def test_vendor_routes(self):
expected = ['heartbeat', 'pass_deploy_info',
'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(list(vendor_routes)))
def test_driver_routes(self):
expected = ['lookup']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
@mock.patch.object(pxe.VendorPassthru, 'pass_deploy_info')
@mock.patch.object(manager_utils, 'node_set_boot_device')
def test_vendorpassthru_pass_deploy_info(self, set_boot_device_mock,
pxe_vendorpassthru_mock):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
task.driver.vendor.pass_deploy_info(task, **kwargs)
set_boot_device_mock.assert_called_with(task, boot_devices.PXE,
True)
pxe_vendorpassthru_mock.assert_called_once_with(task, **kwargs)
class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentVendorInterfaceTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance')
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
def test_reboot_to_instance(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.called_once_with(task.node)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
agent_reboot_to_instance_mock.assert_called_once_with(task,
**kwargs)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance')
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success')
@mock.patch.object(ilo_common, 'update_boot_mode')
@mock.patch.object(ilo_deploy, '_update_secure_boot_mode')
def test_reboot_to_instance_deploy_fail(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = "Error"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.called_once_with(task.node)
self.assertFalse(func_update_boot_mode.called)
self.assertFalse(func_update_secure_boot_mode.called)
agent_reboot_to_instance_mock.assert_called_once_with(task,
**kwargs)
|
{
"content_hash": "8ec7cab3558a4ad54f46cba24d6a14f7",
"timestamp": "",
"source": "github",
"line_count": 1207,
"max_line_length": 79,
"avg_line_length": 53.40016570008285,
"alnum_prop": 0.5802587892140131,
"repo_name": "rdo-management/ironic",
"id": "f44e532b933dd7aa6edf15c82cd0216e87bea33c",
"size": "65111",
"binary": false,
"copies": "3",
"ref": "refs/heads/mgt-master",
"path": "ironic/tests/drivers/ilo/test_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3067721"
}
],
"symlink_target": ""
}
|
import os
from sys import path
from os.path import join
from django.core.urlresolvers import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path.append(join(BASE_DIR, 'apps'))
ALLOWED_HOSTS = []
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
"django_extensions",
"rest_framework",
]
LOCAL_APPS = [
"users",
"heroes",
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(BASE_DIR, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"builtins": [
],
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "backend.wsgi.application"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "America/Indianapolis"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
LOGIN_REDIRECT_URL = reverse_lazy("app")
LOGIN_URL = reverse_lazy("login")
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": (
"rest_framework.renderers.BrowsableAPIRenderer",
"rest_framework.renderers.JSONRenderer",
),
"DEFAULT_PARSER_CLASSES": (
"rest_framework.parsers.JSONParser",
),
"DEFAULT_PERMISSION_CLASSES": (
"rest_framework.permissions.IsAuthenticated",
"backend.permissions.DjangoModelViewPermissions",
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "backend.pagination.StandardPagination",
"DEFAULT_FILTER_BACKENDS": (
"rest_framework.filters.SearchFilter",
"rest_framework.filters.DjangoFilterBackend",
),
}
AUTH_USER_MODEL = 'users.EmailUser'
|
{
"content_hash": "8b51623653e28746fa4fab9cc60a6825",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 91,
"avg_line_length": 27.10655737704918,
"alnum_prop": 0.66253401874811,
"repo_name": "migcruz/dota2analytics",
"id": "94af59d64c9db787bd1909221bcca32fe7cbacd9",
"size": "3307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "614881"
},
{
"name": "HTML",
"bytes": "6040"
},
{
"name": "JavaScript",
"bytes": "1042028"
},
{
"name": "Python",
"bytes": "32962"
}
],
"symlink_target": ""
}
|
from awxkit.api import pages, client, resources # NOQA
from awxkit.config import config # NOQA
from awxkit import awx # NOQA
from awxkit.ws import WSClient # NOQA
|
{
"content_hash": "dbe8db5bace4e6ef85ab4432ca853a6e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 40.75,
"alnum_prop": 0.7852760736196319,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "23e059823727ea67da6ad34bf28131b42ee6bfc1",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awxkit/awxkit/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sublime
import sublime_plugin
from Vintageous.state import State
from Vintageous.vi.utils import IrreversibleTextCommand
class ViCommandMixin(object):
'''
Provides functionality needed by most vim commands.
Intended to be used with TextCommand and WindowCommand classes.
'''
@property
def _view(self):
'''
Returns the view that should receive any actions.
'''
view = None
try:
view = self.view
except AttributeError:
try:
view = self.window.active_view()
except AttributeError:
raise AttributeError(
'ViCommandMixin must be used with a TextCommand or a WindowCommand class')
return view
@property
def _window(self):
'''
Returns the view that should receive any actions.
'''
window = None
try:
window = self.window
except AttributeError:
try:
window = self.view.window()
except AttributeError:
raise AttributeError(
'ViCommandMixin must be used with a TextCommand or a WindowCommand class')
return window
@property
def state(self):
return State(self._view)
def save_sel(self):
"""
Saves the current .sel() for later reference.
"""
self.old_sel = tuple(self._view.sel())
def is_equal_to_old_sel(self, new_sel):
try:
return (tuple((s.a, s.b) for s in self.old_sel) ==
tuple((s.a, s.b) for s in tuple(self._view.sel())))
except AttributeError:
raise AttributeError('have you forgotten to call .save_sel()?')
def has_sel_changed(self):
"""
`True` is the current selection is different to .old_sel as recorded
by .save_sel().
"""
return not self.is_equal_to_old_sel(self._view.sel())
def enter_normal_mode(self, mode):
"""
Calls the command to enter normal mode.
@mode: The mode the state was in before calling this method.
"""
self._window.run_command('_enter_normal_mode', {'mode': mode})
def enter_insert_mode(self, mode):
"""
Calls the command to enter normal mode.
@mode: The mode the state was in before calling this method.
"""
self._window.run_command('_enter_insert_mode', {'mode': mode})
def set_xpos(self, state):
try:
xpos = self._view.rowcol(self._view.sel()[0].b)[1]
except Exception as e:
print(e)
raise ValueError('could not set xpos')
state.xpos = xpos
def outline_target(self):
prefs = sublime.load_settings('Preferences.sublime-settings')
if prefs.get('vintageous_visualyank') is False:
return
sels = list(self._view.sel())
sublime.set_timeout(
lambda: self._view.erase_regions('vi_yy_target'), 350)
self._view.add_regions('vi_yy_target', sels, 'comment', '', sublime.DRAW_NO_FILL)
class ViTextCommandBase(sublime_plugin.TextCommand, ViCommandMixin):
"""
Base class form motion and action commands.
Not all commands need to derive from this base class, but it's
recommended they do if there isn't any good reason they shouldn't.
"""
# Yank config data is controlled through class attributes. ===============
_can_yank = False
_synthetize_new_line_at_eof = False
_yanks_linewise = False
_populates_small_delete_register = False
#=========================================================================
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Due to MRO in Python subclasses, IrreversibleTextCommand must come first so
# that the modified .run_() method is found first.
class ViMotionCommand(IrreversibleTextCommand, ViTextCommandBase):
"""
Motions should bypass the undo stack.
"""
pass
class ViWindowCommandBase(sublime_plugin.WindowCommand, ViCommandMixin):
"""
Base class form some window commands.
Not all window commands need to derive from this base class, but it's
recommended they do if there isn't any good reason they shouldn't.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
{
"content_hash": "246268f06e3a38d8f4aee1a7b5444017",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 94,
"avg_line_length": 30.583333333333332,
"alnum_prop": 0.5887829246139873,
"repo_name": "gerardroche/Vintageous",
"id": "8d56db51d8b1eea77ba5962bf180e55b40e06a98",
"size": "4404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vi/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "999051"
}
],
"symlink_target": ""
}
|
import burnGPIO as IO
from time import sleep
import sys, termios, atexit
from intelhex import IntelHex
from select import select
from CpuPIC18F import PIC18F
from mydelay import mydelay
class PIC18FXXK80(PIC18F):
WriteBufferSize =64
EraseBufferSize =64
#cpu List dict. CpuId [Name , IdMask, Revision Mask, Block Size, Block Count, Program Size, WriteBuffer]
DataSize = 1024
ListName = 0
ListProgramSize = 1
CpuList = {
# ID Name Code Size
0x60E0 : ['PIC18F66K80' , 0x10000 ],
0x6100 : ['PIC18F46K80' , 0x10000 ],
0x6120 : ['PIC18F26K80' , 0x10000 ],
0x6140 : ['PIC18F65K80' , 0x8000 ],
0x6160 : ['PIC18F45K80' , 0x8000 ],
0x6180 : ['PIC18F25K80' , 0x8000 ],
0x61C0 : ['PIC18LF66K80' , 0x10000 ],
0x61E0 : ['PIC18LF46K80' , 0x10000 ],
0x6200 : ['PIC18LF26K80' , 0x10000 ],
0x6220 : ['PIC18LF65K80' , 0x8000 ],
0x6240 : ['PIC18LF45K80' , 0x8000 ],
0x6260 : ['PIC18LF25K80' , 0x8000 ]
}
PicFamily = 'PIC18FXXK80'
def SendMagic(self):
magic = 0x4d434850
#MSB FIRST
IO.GPIO.setup(IO.PIC_DATA, IO.GPIO.OUT)
for loop in range(32):
f = (magic & 0x80000000) == 0x80000000
# if f:
# print 1,
# else:
# print 0,
# IO.GPIO.output(IO.PIC_DATA, (magic & 0x80000000) == 0x80000000)
IO.GPIO.output(IO.PIC_DATA, f)
IO.GPIO.output(IO.PIC_CLK, True)
IO.GPIO.output(IO.PIC_CLK, False)
magic = magic << 1
def Set_LVP(self):
#put MCLR HIGH
IO.GPIO.output(IO.PIC_CLK, False)
IO.GPIO.setup(IO.PIC_DATA, IO.GPIO.OUT)
IO.GPIO.output(IO.PIC_DATA, False)
IO.GPIO.output(IO.PIC_MCLR, True)
sleep(0.1)
#put MCLR LOW
IO.GPIO.output(IO.PIC_MCLR, False)
sleep(0.001)
self.SendMagic()
sleep(0.001)
#put MCLR HIGH
IO.GPIO.output(IO.PIC_MCLR, True)
sleep(0.1)
_byte1 = self.ReadMemory(0x3ffffe)
_byte2 = self.ReadMemoryNext()
if (_byte1 != 255) or ( _byte2 != 255):
print("IdTag ", hex(_byte1), ",", hex(_byte2))
def Release_LVP(self):
#just keep it on reset
IO.GPIO.output(IO.PIC_MCLR, True)
sleep(0.001)
IO.GPIO.output(IO.PIC_MCLR, False)
def BulkErase(self):
print("Bulk Erase ",end='')
#erase BLOCK
for l in range(4):
self.LoadMemoryAddress(0x3C0004)
self.LoadCommandWord(self.C_PIC18_WRITE,0x404)
self.LoadMemoryAddress(0x3C0005)
Value = 1 << l
Value = Value | (Value << 8)
self.LoadCommandWord(self.C_PIC18_WRITE,Value)
self.LoadMemoryAddress(0x3C0006)
self.LoadCommandWord(self.C_PIC18_WRITE,0x8080)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_NOP)
#wait 6 ms
sleep(0.006)
self.LoadWord(0)
#erase boot block
self.LoadMemoryAddress(0x3C0004)
self.LoadCommandWord(self.C_PIC18_WRITE,0x505)
self.LoadMemoryAddress(0x3C0005)
self.LoadCommandWord(self.C_PIC18_WRITE,0x0)
self.LoadMemoryAddress(0x3C0006)
self.LoadCommandWord(self.C_PIC18_WRITE,0x8080)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_NOP)
#wait 6 ms
sleep(0.006)
self.LoadWord(0)
#erase config.Fuses
self.LoadMemoryAddress(0x3C0004)
self.LoadCommandWord(self.C_PIC18_WRITE,0x0202)
self.LoadMemoryAddress(0x3C0005)
self.LoadCommandWord(self.C_PIC18_WRITE,0x0000)
self.LoadMemoryAddress(0x3C0006)
self.LoadCommandWord(self.C_PIC18_WRITE,0x8080)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_NOP)
#wait 6 ms
sleep(0.006)
self.LoadWord(0)
#erase eerom data
self.LoadMemoryAddress(0x3C0004)
self.LoadCommandWord(self.C_PIC18_WRITE,0x0404)
self.LoadMemoryAddress(0x3C0005)
self.LoadCommandWord(self.C_PIC18_WRITE,0x0000)
self.LoadMemoryAddress(0x3C0006)
self.LoadCommandWord(self.C_PIC18_WRITE,0x8080)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_NOP)
sleep(0.006)
self.LoadWord(0)
print("..... Done!")
def ProgramBurn(self, pic_data):
print("Writing Program BufferSize=",end='')
print(self.WriteBufferSize,end='')
#Direct access to code memory
self.LoadCode(0x8E7F)
self.LoadCode(0x9C7F)
self.LoadCode(0x847F)
#create a buffer to hold program code
WordCount = self.WriteBufferSize/2
wbuffer = [0xffff] * WordCount
#ok load until all code is written
for l in range(0,self.ProgramSize, self.WriteBufferSize):
BlankFlag= True
#get all buffer and check if they are all blank
for i in range(WordCount):
wbuffer[i] = self.SearchWordValue(pic_data, l+(i * 2)+self.ProgramBase)
if wbuffer[i] != 0xffff:
BlankFlag= False
#if they are all blank just skip it
if BlankFlag:
continue
#ok let's write the buffer
self.LoadMemoryAddress(l)
for i in range(WordCount-1):
self.LoadCommandWord(self.C_PIC18_WRITE_INC_BY2,wbuffer[i])
self.LoadCommandWord(self.C_PIC18_START_PGM,wbuffer[WordCount-1])
#and wait
self.WriteAndWait()
if (l % 1024) == 0:
sys.stdout.write('.')
sys.stdout.flush()
#disable write
self.LoadCode(0x947F)
print("Done!")
def DataBurn(self,pic_data):
print("Writing EEPROM data[",self.DataSize,"]",end="")
#direct access to data EEPROM
self.LoadCode(0x9E7F)
self.LoadCode(0x9C7F)
for l in range(self.DataSize):
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
Value= self.SearchByteValue(pic_data, l + self.DataBase)
if Value == 0xff:
continue
#Set data EEPROM Address Pointer
self.LoadEepromAddress(l)
#Load the data to be written
self.LoadCode(0x0e00 | Value)
self.LoadCode(0x6E73)
#enable memory writes
self.LoadCode(0x847F)
#Initiate write
self.LoadCode(0x827F)
#Poll WR bit until bit is clear
while True:
#Poll WR bit,
self.LoadCode(0x507F)
self.LoadCode(0x6EF5)
self.LoadCommand(self.C_PIC18_TABLAT)
EECON1 = self.ReadData()
if (EECON1 & 2) == 0:
break
# sleep maybe needed if using python compiler
# sleep(0.0001)
#disable write
self.LoadCode(0x947F)
print("Done!")
def IDBurn(self,pic_data):
print("Writing ID",end='')
#direct access config memory
self.LoadCode(0x8E7F)
self.LoadCode(0x9C7F)
#Load Write buffer
self.LoadMemoryAddress(0x200000)
self.LoadCommandWord(self.C_PIC18_WRITE_INC_BY2, self.SearchWordValue(pic_data,self.IDBase))
self.LoadCommandWord(self.C_PIC18_WRITE_INC_BY2, self.SearchWordValue(pic_data,self.IDBase+2))
self.LoadCommandWord(self.C_PIC18_WRITE_INC_BY2, self.SearchWordValue(pic_data,self.IDBase+4))
self.LoadCommandWord(self.C_PIC18_START_PGM, self.SearchWordValue(pic_data,self.IDBase+6))
self.WriteConfig()
print(" ... Done!")
return
ConfigMask = [0x5d, 0xdf, 0x7F, 0x7f, 0, 0x8f, 0x91,0, 0x0f, 0xC0, 0x0F, 0xE0, 0x0f, 0x40]
def WriteConfig(self):
IO.GPIO.setup(IO.PIC_DATA, IO.GPIO.OUT)
IO.GPIO.output(IO.PIC_DATA, False)
mydelay()
for loop in range(3):
IO.GPIO.output(IO.PIC_CLK, True)
mydelay()
IO.GPIO.output(IO.PIC_CLK, False)
mydelay()
IO.GPIO.output(IO.PIC_CLK, True)
sleep(0.01)
IO.GPIO.output(IO.PIC_CLK, False)
mydelay()
self.LoadWord(0)
def ConfigBurn(self,pic_data):
print("CONFIG Burn",end='')
#direct access config memory
self.LoadCode(0x8E7F)
self.LoadCode(0x8C7F)
#burn all config but CONFIG6 last because of WRTC
for l in range(11)+[12,13,11]:
#if config is 30004h or 30007h skip it
if (l == 4) or (l==7) :
continue
#get Config Target Value
TargetValue = pic_data.get(self.ConfigBase +l)
if TargetValue == None:
continue
#use mask to disable unwanted bit
TargetValue = TargetValue & self.ConfigMask[l]
#put MSB and LSB the same
TargetValue = TargetValue | (TargetValue << 8)
self.LoadMemoryAddress(self.ConfigBase+ l)
self.LoadCommandWord(self.C_PIC18_START_PGM,TargetValue)
self.WriteConfig()
self.LoadCode(0x947F)
print(" ... Done!")
def ConfigCheck(self,pic_data):
print("Config Check ",end='')
self.LoadMemoryAddress(self.ConfigBase)
for l in range (14):
Value = self.ReadMemoryNext()
#if config is 30004h or 30007h skip it
if (l == 4) or (l==7) :
continue
TargetValue = pic_data.get(self.ConfigBase +l)
if TargetValue == None:
continue
#use mask to disable unwanted bit
TargetValue = TargetValue & self.ConfigMask[l]
Value = Value & self.ConfigMask[l]
if(Value != TargetValue):
print(" **** Address ", hex(l), " write ", hex(TargetValue), " read" , hex(Value))
return False
if (l % 1024)==0 :
sys.stdout.write('.')
sys.stdout.flush()
print(" ... mydelay()ed!")
return True
def LoadEepromAddress(self,EepromAddress):
self.LoadCode(0x0E00 | ((EepromAddress >> 8) & 0xff))
self.LoadCode(0x6E74)
self.LoadCode(0x0E00 | (EepromAddress & 0xff))
self.LoadCode(0x6E75)
def DataBlankCheck(self):
print("EEPROM DATA[",self.DataSize,"] Blank Check ",end='')
#Direct access to data EEPROM
self.LoadCode(0x9E7F)
self.LoadCode(0x9C7F)
for l in range(self.DataSize):
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
#Set data EEPROM Address Pointer
self.LoadEepromAddress(l)
#Initiate A memory Read
self.LoadCode(0x807F)
#Load data into the serial data
self.LoadCode(0x5073)
self.LoadCode(0x6EF5)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_TABLAT)
RValue= self.ReadData()
if RValue != 0xff :
print(" *** EEPROM DATA address ", hex(l), " not blank! read" , hex(RValue))
return False
print("Done!")
return True
def DataCheck(self,pic_data):
print("EEPROM DATA[",self.DataSize,"] Check ",end='')
#Direct access to data EEPROM
self.LoadCode(0x9E7F)
self.LoadCode(0x9C7F)
for l in range(self.DataSize):
if (l % 32)==0 :
sys.stdout.write('.')
sys.stdout.flush()
Value = self.SearchByteValue(pic_data, l + self.DataBase)
#Set data EEPROM Address Pointer
self.LoadEepromAddress(l)
#Initiate A memory Read
self.LoadCode(0x807F)
#Load data into the serial data
self.LoadCode(0x5073)
self.LoadCode(0x6EF5)
self.LoadCode(0)
self.LoadCommand(self.C_PIC18_TABLAT)
RValue= self.ReadData()
if Value != RValue :
print(" *** EEROM address ", hex(l), " write ", hex(Value), " read" , hex(RValue))
return False
print("Done!")
return True
def FindCpu(self, Id):
_cpuInfo =self.CpuList.get(Id & 0xFFE0)
if _cpuInfo != None:
self.ProgramSize = _cpuInfo[self.ListProgramSize]
self.CpuId = Id & 0xFFE0
self.CpuRevision = Id &0x1F
return _cpuInfo
return None
|
{
"content_hash": "c7023388412726b636db298566090d41",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 109,
"avg_line_length": 29.353562005277045,
"alnum_prop": 0.6332584269662921,
"repo_name": "danjperron/burnLVP",
"id": "b11801bc164e4181440053fcbafb9af2587aca59",
"size": "12741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CpuPIC18FXXK80.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126903"
}
],
"symlink_target": ""
}
|
from etraining_course_finder.model import CourseOperation
from flask import Flask, request, render_template
from flask import redirect, url_for
app = Flask(__name__)
app.config.from_object('etraining_course_finder.publishconf')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST' and request.form['query'] != '':
return redirect(url_for('finder', query=request.form['query']))
return render_template('base.html')
@app.route('/search/<query>')
def finder(query):
co = CourseOperation()
course_list = co.get_course_by_query(query)
return render_template('finder.html', query=query, course_list=course_list)
@app.route('/about')
def about():
return render_template('about.html')
|
{
"content_hash": "1842ef5dcb06ece39a4d4fa3ca2c05bc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 31.608695652173914,
"alnum_prop": 0.7152682255845942,
"repo_name": "hansliu/etraining-course-finder-heroku",
"id": "95cf647a404d7220cbe9c745d4845375374abe5e",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etraining_course_finder/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1941"
},
{
"name": "HTML",
"bytes": "8669"
},
{
"name": "Python",
"bytes": "10732"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import time
from warnings import warn
import platform
import os
import sys
from . import basex
from . import dasch
from . import daun
from . import direct
from . import hansenlaw
from . import linbasex
from . import onion_bordas
from . import rbasex
from . import tools
from abel import _deprecated, _deprecate
class Transform(object):
r"""
Abel transform image class.
This class provides whole-image forward and inverse Abel
transforms, together with preprocessing (centering, symmetrizing)
and postprocessing (integration) functions.
Parameters
----------
IM : a N×M numpy array
This is the image to be transformed
direction : str
The type of Abel transform to be performed.
``forward``
A forward Abel transform takes a (2D) slice of a 3D image and
returns the 2D projection.
``inverse`` (default)
An inverse Abel transform takes a 2D projection and reconstructs
a 2D slice of the 3D image.
method : str
specifies which numerical approximation to the Abel transform should be
employed (see below). The options are
``basex``
the Gaussian "basis set expansion" method of Dribinski et al.
(2002).
``daun``
the deconvolution method with Tikhonov regularization of Daun et
al. and its extensions.
``direct``
a naive implementation of the analytical formula by Roman Yurchuk.
``hansenlaw``
the recursive algorithm described by Hansen and Law (1985).
``linbasex``
the 1D projections of velocity-mapping images in terms of 1D
spherical functions by Gerber et al. (2013).
``onion_bordas``
the algorithm of Bordas and co-workers (1996),
re-implemented by Rallis, Wells and co-workers (2014).
``onion_peeling``
the onion peeling deconvolution as described by Dasch (1992).
``rbasex``
a method similar to pBasex by Garcia et al. (2004) for
velocity-mapping images, but with analytical basis functions
developed by Ryazanov (2012).
``three_point``
the three-point transform of Dasch (1992).
``two_point``
the two-point transform of Dasch (1992).
origin : tuple or str
Before applying Abel transform, the image is centered around this
point.
If a tuple (float, float) is provided, this specifies the image origin
in the (row, column) format. If a string is provided, an automatic
centering algorithm is used:
``image_center``
The origin is assumed to be the center of the image.
``convolution``
The origin is found from autoconvolution of image projections
along each axis.
``slice``
The origin is found by comparing slices in the horizontal and
vertical directions.
``com``
The origin is calculated as the center of mass.
``gaussian``
The origin is found using a fit to a Gaussian function. This only
makes sense if your data looks like a Gaussian.
``none`` (default)
No centering is performed. An image with an odd number of columns
must be provided.
symmetry_axis : None, int or tuple
Symmetrize the image about the numpy axis
0 (vertical), 1 (horizontal), (0, 1) (both axes). Note that the
Abel transform is always performed around the vertical axis.
This parameter only affects how the image is modified before
(and after) applying the Abel transform. For more information,
see the "Quadrant combining" note below.
use_quadrants : tuple of 4 booleans
select quadrants to be used in the analysis: (Q0, Q1, Q2, Q3).
Quadrants are numbered counter-clockwide from upper right.
See note below for description of quadrants.
Default is ``(True, True, True, True)``, which uses all quadrants.
symmetrize_method : str
Method used for symmetrizing the image.
``average``
Average the quadrants, in accordance with the **symmetry_axis**.
``fourier``
Axial symmetry implies that the Fourier components of the 2D
projection should be real. Removing the imaginary components in
reciprocal space leaves a symmetric projection.
K. R. Overstreet, P. Zabawa, J. Tallant, A. Schwettmann,
J. P. Shaffer,
"Multiple scattering and the density distribution of a Cs MOT",
`Optics Express 13, 9672–9682 (2005)
<https://doi.org/10.1364/OPEX.13.009672>`__.
angular_integration : bool
Integrate the image over angle to give the radial (speed) intensity
distribution.
*Note: in PyAbel ≤0.8.4 the intensity distribution was off by a factor
of π, please keep this in mind when comparing absolute intensities.*
transform_options : dict
Additional arguments passed to the individual transform functions.
See the documentation for the individual transform method for options.
center_options : dict
Additional arguments to be passed to the centering function,
see :func:`abel.tools.center.center_image()`.
angular_integration_options : dict
Additional arguments passed to the angular integration functions,
see :func:`abel.tools.vmi.angular_integration_3D()`.
recast_as_float64 : bool
determines whether the input image should be recast to
``float64``. Many images are imported in other formats (such as
``uint8`` or ``uint16``), and this does not always play well with the
transorm algorithms. This should probably always be set to ``True``
(default).
verbose : bool
determines whether non-critical output should be printed.
.. note::
Quadrant combining:
The quadrants can be combined (averaged) using the ``use_quadrants``
keyword in order to provide better data quality.
The quadrants are numbered starting from Q0 in the upper right and
proceeding counter-clockwise::
+--------+--------+
| Q1 * | * Q0 |
| * | * |
| * | * | AQ1 | AQ0
+--------o--------+ --([inverse] Abel transform)--> ----o----
| * | * | AQ2 | AQ3
| * | * |
| Q2 * | * Q3 | AQi == [inverse] Abel transform
+--------+--------+ of quadrant Qi
Three cases are possible:
1) symmetry_axis = 0 (vertical)::
Combine: Q01 = Q0 + Q1, Q23 = Q2 + Q3
inverse image AQ01 | AQ01
-----o----- (left and right sides equivalent)
AQ23 | AQ23
2) symmetry_axis = 1 (horizontal)::
Combine: Q12 = Q1 + Q2, Q03 = Q0 + Q3
inverse image AQ12 | AQ03
-----o----- (top and bottom equivalent)
AQ12 | AQ03
3) symmetry_axis = (0, 1) (both)::
Combine: Q = Q0 + Q1 + Q2 + Q3
inverse image AQ | AQ
---o--- (all quadrants equivalent)
AQ | AQ
Notes
-----
As mentioned above, PyAbel offers several different approximations to the
the exact Abel transform. All the methods should produce similar
results, but depending on the level and type of noise found in the image,
certain methods may perform better than others. Please see the
:ref:`TransformMethods` section of the documentation for complete
information.
The methods marked with a * indicate methods that generate basis sets. The
first time they are run for a new image size, it takes seconds to minutes
to generate the basis set. However, this basis set is saved to disk can
be reloaded, meaning that future transforms are performed much more
quickly.
``basex`` *
The "basis set exapansion" algorithm describes the data in terms of
gaussian-like functions, which themselves can be Abel-transformed
analytically. With the default functions, centered at each pixel,
this method also does not make any assumption about the
shape of the data. This method is one of the de-facto standards in
photoelectron/photoion imaging.
V. Dribinski, A. Ossadtchi, V. A. Mandelshtam, H. Reisler,
"Reconstruction of Abel-transformable images: The Gaussian basis-set
expansion Abel transform method",
`Rev. Sci. Instrum. 73, 2634–2642 (2002)
<https://doi.org/10.1063/1.1482156>`__.
``daun`` *
Methods based on onion-peeling deconvolution using Tikhonov
regularization described in
K. J. Daun, K. A. Thomson, F. Liu, G. J. Smallwood,
"Deconvolution of axisymmetric flame properties using Tikhonov
regularization",
`Appl. Opt. 45, 4638–4646 (2006)
<https://doi.org/10.1364/AO.45.004638>`__.
In addition to the original implicit step-functions basis (“onion
peeling”) and the derivative regularization, linear and quadratic basis
functions are implemented, as well as the :math:`L_2`-norm Tikhonov
regularization (like in ``basex``) and non-negative least-squares
solution.
``direct``
This method attempts a direct integration of the Abel-transform
integral. It makes no assumptions about the data (apart from
cylindrical symmetry), but it typically requires fine sampling to
converge. Such methods are typically inefficient, but thanks to this
Cython implementation (by Roman Yurchuk), this "direct" method is
competitive with the other methods.
``hansenlaw``
This "recursive algorithm" produces reliable results and is quite fast
(~0.1 s for a 1001×1001 image). It makes no assumptions about the data
(apart from cylindrical symmetry). It tends to require that the data is
finely sampled for good convergence.
E. W. Hansen, P.-L. Law,
"Recursive methods for computing the Abel transform and its inverse",
`J. Opt. Soc. Am. A 2, 510–520 (1985)
<https://doi.org/10.1364/JOSAA.2.000510>`__.
``linbasex`` *
Velocity-mapping images are composed of projected Newton spheres with
a common centre. The 2D images are usually evaluated by a
decomposition into base vectors, each representing the 2D projection
of a set of particles starting from a centre with a specific velocity
distribution. Lin-BASEX evaluates 1D projections of VM images in terms
of 1D projections of spherical functions, instead.
Th. Gerber, Yu. Liu, G. Knopp, P. Hemberger, A. Bodi, P. Radi,
Ya. Sych,
"Charged particle velocity map image reconstruction with
one-dimensional projections of spherical functions",
`Rev. Sci. Instrum. 84, 033101 (2013)
<https://doi.org/10.1063/1.4793404>`__.
``onion_bordas``
The onion peeling method, also known as "back projection", originates
from
C. Bordas, F. Paulig,
"Photoelectron imaging spectrometry: Principle and inversion method",
`Rev. Sci. Instrum. 67, 2257–2268 (1996)
<https://doi.org/10.1063/1.1147044>`__.
The algorithm was subsequently coded in MatLab by
C. E. Rallis, T. G. Burwitz, P. R. Andrews, M. Zohrabi, R. Averin,
S. De, B. Bergues, B. Jochim, A. V. Voznyuk, N. Gregerson, B. Gaire,
I. Znakovskaya, J. McKenna, K. D. Carnes, M. F. Kling, I. Ben-Itzhak,
E. Wells,
"Incorporating real time velocity map image reconstruction into
closed-loop coherent control",
`Rev. Sci. Instrum. 85, 113105 (2014)
<https://doi.org/10.1063/1.4899267>`__,
which was used as the basis of this Python port. See `issue #56
<https://github.com/PyAbel/PyAbel/issues/56>`__.
``onion_peeling`` *
This is one of the most compact and fast algorithms, with the inverse
Abel transform achieved in one Python code-line, `PR #155
<https://github.com/PyAbel/PyAbel/pull/155>`__. See also
``three_point`` is the onion peeling algorithm as described by Dasch
(1992), reference below.
``rbasex`` *
The pBasex method by
G. A. Garcia, L. Nahon, I. Powis,
“Two-dimensional charged particle image inversion using a polar basis
function expansion”,
`Rev. Sci. Instrum. 75, 4989–2996 (2004)
<http://doi.org/10.1063/1.1807578>`__
adapts the BASEX ("basis set expansion") method to the specific case of
velocity-mapping images by using a basis of 2D functions in polar
coordinates, such that the reconstructed radial distributions are
obtained directly from the expansion coefficients.
This method employs the same approach, but uses more convenient basis
functions, which have analytical Abel transforms separable into radial
and angular parts, developed in
M. Ryazanov,
“Development and implementation of methods for sliced velocity map
imaging. Studies of overtone-induced dissociation and isomerization
dynamics of hydroxymethyl radical (CH\ :sub:`2`\ OH and
CD\ :sub:`2`\ OH)”,
Ph.D. dissertation, University of Southern California, 2012
(`ProQuest <https://search.proquest.com/docview/1289069738>`__,
`USC <https://digitallibrary.usc.edu/asset-management/2A3BF169XWB4>`__).
``three_point`` *
The "Three Point" Abel transform method exploits the observation that
the value of the Abel inverted data at any radial position r is
primarily determined from changes in the projection data in the
neighborhood of r. This method is also very efficient once it has
generated the basis sets.
C. J. Dasch,
"One-dimensional tomography: a comparison of Abel, onion-peeling, and
filtered backprojection methods",
`Appl. Opt. 31, 1146–1152 (1992)
<https://doi.org/10.1364/AO.31.001146>`__.
``two_point`` *
Another Dasch method. Simple, and fast, but not as accurate as the
other methods.
The following class attributes are available, depending on the calculation.
Returns
-------
transform : numpy 2D array
the 2D forward/inverse Abel-transformed image.
angular_integration : tuple
(radial-grid, radial-intensity)
radial coordinates and the radial intensity (speed) distribution,
evaluated using :func:`abel.tools.vmi.angular_integration_3D()`.
residual : numpy 2D array
residual image (not currently implemented).
IM : numpy 2D array
the input image, re-centered (optional) with an odd-size width.
method : str
transform method, as specified by the input option.
direction : str
transform direction, as specified by the input option.
Beta : numpy 2D array
with ``method=linbasex, transform_options=dict(return_Beta=True)``:
Beta array coefficients of Newton-sphere spherical harmonics
Beta[0] - the radial intensity variation
Beta[1] - the anisotropy parameter variation
...Beta[n] - higher-order terms up to ``legedre_orders=[0, ..., n]``
radial : numpy 1D array
with ``method=linbasex, transform_options=dict(return_Beta=True)``:
radial grid for Beta array
projection : numpy 2D array
with ``method=linbasex, transform_options=dict(return_Beta=True)``:
radial projection profiles at angles **proj_angles**
distr : Distributions.Results object
with ``method=rbasex``: the object from which various radial
distributions can be retrieved
"""
_verbose = False
def __init__(self, IM,
direction='inverse', method='three_point', origin='none',
symmetry_axis=None, use_quadrants=(True, True, True, True),
symmetrize_method='average', angular_integration=False,
transform_options=dict(), center_options=dict(),
angular_integration_options=dict(),
recast_as_float64=True, verbose=False, center=_deprecated):
"""
The one-stop transform function.
"""
if center is not _deprecated:
_deprecate('abel.transform.Transform() '
'argument "center" is deprecated, use "origin" instead.')
origin = center
# public class variables
self.IM = IM # (optionally) centered, odd-width image
self.method = method
self.direction = direction
# private internal variables
self._origin = origin
self._symmetry_axis = symmetry_axis
self._symmetrize_method = symmetrize_method
self._use_quadrants = use_quadrants
self._transform_options = transform_options
self._recast_as_float64 = recast_as_float64
_verbose = verbose
# image processing
self._verify_some_inputs()
self._center_image(origin, **center_options)
self._abel_transform_image(**transform_options)
self._integration(angular_integration, transform_options,
**angular_integration_options)
# end of class instance
_verboseprint = print if _verbose else lambda *a, **k: None
def _verify_some_inputs(self):
if self.IM.ndim == 1 or np.shape(self.IM)[0] <= 2:
raise ValueError('Data must be 2-dimensional. '
'To transform a single row, '
'use the individual transform function.')
if not np.any(self._use_quadrants):
raise ValueError('No image quadrants selected to use')
if not isinstance(self._symmetry_axis, (list, tuple)):
# if the user supplies an int, make it into a 1-element list:
self._symmetry_axis = [self._symmetry_axis]
elif len(self._symmetry_axis) == 0:
# treat symmetry_axis=[] as symmetry_axis=None
self._symmetry_axis = [None]
if self.method == 'rbasex' and self._origin != 'none':
if self._transform_options.get('origin') is not None:
raise ValueError('Either use the "origin" argument to center '
'the image, or pass "origin" to rbasex in '
'"transform_options" to use the image as '
'is, but don\'t do both.')
if self._transform_options.get('weights') is not None:
raise ValueError('Using the "origin" argument will center '
'the image but not the "weights" array '
'passed to rbasex. If you want to specify '
'the image origin, pass it in '
'"transform_options".')
if self._recast_as_float64:
self.IM = self.IM.astype('float64')
def _center_image(self, method, **center_options):
if method != "none":
self.IM = tools.center.center_image(self.IM, method,
**center_options)
def _abel_transform_image(self, **transform_options):
self._verboseprint('Calculating {0} Abel transform using {1} method -'
.format(self.direction, self.method),
'\n image size: {:d}x{:d}'.format(*self.IM.shape))
t0 = time.time()
if self.method == "linbasex" and self._symmetry_axis is not None:
self._abel_transform_image_full_linbasex(**transform_options)
elif self.method == "rbasex":
self._abel_transform_image_full_rbasex(**transform_options)
else:
self._abel_transform_image_by_quadrant(**transform_options)
self._verboseprint("{:.2f} seconds".format(time.time() - t0))
def _abel_transform_image_full_linbasex(self, **transform_options):
self.transform, self.radial, self.Beta, self.projection = \
linbasex.linbasex_transform_full(self.IM, **transform_options)
def _abel_transform_image_full_rbasex(self, **transform_options):
self.transform, self.distr = \
rbasex.rbasex_transform(self.IM, direction=self.direction,
**transform_options)
def _abel_transform_image_by_quadrant(self, **transform_options):
abel_transform = {
"basex": basex.basex_transform,
"daun": daun.daun_transform,
"direct": direct.direct_transform,
"hansenlaw": hansenlaw.hansenlaw_transform,
"linbasex": linbasex.linbasex_transform,
"onion_bordas": onion_bordas.onion_bordas_transform,
"onion_peeling": dasch.onion_peeling_transform,
"two_point": dasch.two_point_transform,
"three_point": dasch.three_point_transform,
}
self._verboseprint('Calculating {0} Abel transform using {1} method -'
.format(self.direction, self.method),
'\n image size: {:d}x{:d}'.format(*self.IM.shape))
t0 = time.time()
# split image into quadrants
Q0, Q1, Q2, Q3 = tools.symmetry.get_image_quadrants(
self.IM, reorient=True,
use_quadrants=self._use_quadrants,
symmetry_axis=self._symmetry_axis,
symmetrize_method=self._symmetrize_method)
def selected_transform(Z):
return abel_transform[self.method](Z, direction=self.direction,
**transform_options)
AQ0 = AQ1 = AQ2 = AQ3 = None
# Inverse Abel transform for quadrant 1 (all include Q1)
AQ1 = selected_transform(Q1)
if 1 not in self._symmetry_axis:
AQ2 = selected_transform(Q2)
if 0 not in self._symmetry_axis:
AQ0 = selected_transform(Q0)
if None in self._symmetry_axis:
AQ3 = selected_transform(Q3)
if self.method == "linbasex" and\
"return_Beta" in transform_options.keys():
# linbasex evaluates speed and anisotropy parameters
# AQi == AIM, R, Beta, QLz
Beta0 = AQ0[2]
Beta1 = AQ1[2]
Beta2 = AQ2[2]
Beta3 = AQ3[2]
# rconstructed images of each quadrant
AQ0 = AQ0[0]
AQ1 = AQ1[0]
AQ2 = AQ2[0]
AQ3 = AQ3[0]
# speed
self.linbasex_angular_integration = self.Beta[0]\
(Beta0[0] + Beta1[0] + Beta2[0] + Beta3[0])/4
# anisotropy
self.linbasex_anisotropy_parameter = self.Beta[1]\
(Beta0[1] + Beta1[1] + Beta2[1] + Beta3[1])/4
# reassemble image
self.transform = tools.symmetry.put_image_quadrants(
(AQ0, AQ1, AQ2, AQ3),
original_image_shape=self.IM.shape,
symmetry_axis=self._symmetry_axis)
self._verboseprint("{:.2f} seconds".format(time.time()-t0))
def _integration(self, angular_integration, transform_options,
**angular_integration_options):
if angular_integration:
if 'dr' in transform_options and\
'dr' not in angular_integration_options:
# assume user forgot to pass grid size
angular_integration_options['dr'] = transform_options['dr']
self.angular_integration = tools.vmi.angular_integration_3D(
self.transform,
**angular_integration_options)
# Default directory for cached basis sets;
# used by set_basis_dir() and get_basis_dir().
# DON'T access this variable directly!
_basis_dir = ''
def set_basis_dir(basis_dir='', make=True):
"""
Changes the path to the directory for saving/loading cached basis sets that
transform methods use by default.
Parameters
----------
basis_dir : str or None
absolute or relative path. Passing ``''`` (default) resets to the
system-dependent default path, see :func:`default_basis_dir`. For the
current working directory (as in PyAbel up to v0.8.4), use ``'.'``. To
disable basis-set caching on disk, use ``None``.
make : bool
create the directory if it does not exist (default: yes)
Returns
-------
None
"""
global _basis_dir
if basis_dir == '':
_basis_dir = default_basis_dir()
else:
_basis_dir = basis_dir
if make:
_make_basis_dir() # (safe for None)
def get_basis_dir(make=False):
"""
Gets the path to the directory for saving/loading cached basis sets that
transform methods use by default. If not changed by :func:`set_basis_dir`,
it depends on the operating system, see :func:`default_basis_dir`.
Parameters
----------
make : bool
create the directory if it does not exist (default: no)
Returns
-------
path : str or None
absolute or relative path if disk caching is enabled,
otherwise ``None``
"""
global _basis_dir
if _basis_dir == '':
set_basis_dir('', make)
return _basis_dir
def _make_basis_dir():
"""
Internal utility function.
Makes sure that the default basis-set directory (if enabled) exists.
"""
if _basis_dir is None or os.path.exists(_basis_dir):
return
try:
os.makedirs(_basis_dir)
except Exception as e:
print('Cannot create the directory\n"{}"\n'
'for saving/loading basis sets:'.format(_basis_dir))
raise
def default_basis_dir():
r"""
Gets full path to the system-dependent default directory for saving/loading
cached basis sets:
Linux (and other Unix-like):
``~/.cache/PyAbel`` (or ``$XDG_CACHE_HOME/PyAbel`` if set)
macOS:
``/Users/<user>/Library/Caches/PyAbel``
Windows:
``<user profile>\AppData\Local\PyAbel\cache`` (or
``%LOCALAPPDATA%\PyAbel\cache`` if set). See important notes below.
Parameters
----------
None
Returns
-------
path : str
full path to the system-dependent default basis-sets directory
.. admonition:: Notes for MS Windows users
* Python installed from Microsoft Store redirects subdirectory creation
in ``AppData\Local`` to a `"private per-user, per-app location"
<https://docs.microsoft.com/en-us/windows/msix/desktop/desktop-to-uwp-behind-the-scenes>`__
``AppData\Local\Packages\Python...\LocalCache\Local`` (see `Using
Python on Windows / Known Issues
<https://docs.python.org/3/using/windows.html#known-issues>`__).
However, if ``AppData\Local\PyAbel\`` already exists (for example, was
manually created *not* from Python), apparently it should be usable.
* Old Windows versions (2000, XP, Server 2003) by default don't set the
``LOCALAPPDATA`` environment variable, so PyAbel will create and use
the ``AppData\Local`` subtree in the user profile folder. This is
probably fine, but not how it should be. To use the standard location,
please do ::
set LOCALAPPDATA=%USERPROFILE%\Local Settings\Application Data
before starting Python. Or permanently set it in “Environment
Variables” from Windows “System Properties”.
"""
# Based on the appdirs module, https://github.com/ActiveState/appdirs
# (see https://github.com/ActiveState/appdirs/blob/master/appdirs.py)
system = platform.system()
if system == 'Darwin': # macOS
return os.path.expanduser('~/Library/Caches/PyAbel')
if system == 'Windows':
return os.path.join(os.getenv('LOCALAPPDATA',
os.path.expanduser(r'~\AppData\Local')),
r'PyAbel\cache')
# Linux and other
return os.path.join(os.getenv('XDG_CACHE_HOME',
os.path.expanduser('~/.cache')),
'PyAbel')
# system == 'Java' is ignored as useless -- Jython does not support NumPy
def basis_dir_cleanup(basis_dir='', method=None):
"""
Deletes saved basis sets.
Parameters
----------
basis_dir : str or None
path to the directory with saved basis sets. Use ``''`` for the default
directory, see :func:`get_basis_dir`. (For convenience, ``None`` can be
passed to do nothing.)
method : str or list of str or None
transform methods for which basis sets should be deleted. Can be a
single string (see the :attr:`method` parameter in :class:`Transform`)
or a list of strings. Use ``'all'`` to delete basis sets for all
methods. ``None`` does nothing.
Returns
-------
None
"""
if basis_dir is None or method is None:
return
# make the list of methods
if method == 'all':
methods = ['basex', 'daun', 'linbasex', 'onion_peeling', 'rbasex',
'three_point', 'two_point']
elif np.ndim(method) == 0: # single string
methods = [method]
else: # already a list
methods = method
for method in methods:
if method in ['onion_peeling', 'three_point', 'two_point']:
dasch.basis_dir_cleanup(method, basis_dir)
else:
module = sys.modules.get('abel.' + method)
if not module:
warn('Unknown method "{}"!'.format(method),
SyntaxWarning, stacklevel=2)
continue
func = getattr(module, 'basis_dir_cleanup', None)
if func:
func(basis_dir)
else:
warn('Method "{}" does not save basis sets.'.format(method),
SyntaxWarning, stacklevel=2)
|
{
"content_hash": "73095c0da4725573b5232e1255fdda45",
"timestamp": "",
"source": "github",
"line_count": 785,
"max_line_length": 99,
"avg_line_length": 39.163057324840764,
"alnum_prop": 0.6014051979312364,
"repo_name": "stggh/PyAbel",
"id": "aae623e9c665b470ed36e476d7e5ae1b90ebcfa2",
"size": "30844",
"binary": false,
"copies": "2",
"ref": "refs/heads/HansenLaw-fix",
"path": "abel/transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "3187"
},
{
"name": "Python",
"bytes": "486460"
}
],
"symlink_target": ""
}
|
"""
Implementation of the class of ProphetStor DPL storage adapter of Federator.
# v2.0.1 Consistency group support
# v2.0.2 Pool aware scheduler
# v2.0.3 Consistency group modification support
# v2.0.4 Port ProphetStor driver to use new driver model
"""
import base64
import errno
import json
import random
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from cinder import exception
from cinder.i18n import _, _LI, _LW, _LE
from cinder import objects
from cinder.objects import fields
from cinder.volume import driver
from cinder.volume.drivers.prophetstor import options
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
CONNECTION_RETRY = 10
MAXSNAPSHOTS = 1024
DISCOVER_SERVER_TYPE = 'dpl'
DPL_BLOCKSTOR = '/dpl_blockstor'
DPL_SYSTEM = '/dpl_system'
DPL_VER_V1 = 'v1'
DPL_OBJ_POOL = 'dpl_pool'
DPL_OBJ_DISK = 'dpl_disk'
DPL_OBJ_VOLUME = 'dpl_volume'
DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup'
DPL_OBJ_SNAPSHOT = 'cdmi_snapshots'
DPL_OBJ_EXPORT = 'dpl_export'
DPL_OBJ_REPLICATION = 'cdmi_replication'
DPL_OBJ_TARGET = 'dpl_target'
DPL_OBJ_SYSTEM = 'dpl_system'
DPL_OBJ_SNS = 'sns_table'
class DPLCommand(object):
"""DPL command interface."""
def __init__(self, ip, port, username, password):
self.ip = ip
self.port = port
self.username = username
self.password = password
def send_cmd(self, method, url, params, expected_status):
"""Send command to DPL."""
connection = None
retcode = 0
response = {}
data = {}
header = {'Content-Type': 'application/cdmi-container',
'Accept': 'application/cdmi-container',
'x-cdmi-specification-version': '1.0.2'}
# base64 encode the username and password
auth = base64.encodestring('%s:%s'
% (self.username,
self.password)).replace('\n', '')
header['Authorization'] = 'Basic %s' % auth
if not params:
payload = None
else:
try:
payload = json.dumps(params, ensure_ascii=False)
payload.encode('utf-8')
except Exception as e:
LOG.error(_LE('JSON encode params %(param)s error:'
' %(status)s.'), {'param': params, 'status': e})
retcode = errno.EINVAL
for i in range(CONNECTION_RETRY):
try:
connection = http_client.HTTPSConnection(self.ip,
self.port,
timeout=60)
if connection:
retcode = 0
break
except IOError as ioerr:
LOG.error(_LE('Connect to Flexvisor error: %s.'),
ioerr)
retcode = errno.ENOTCONN
except Exception as e:
LOG.error(_LE('Connect to Flexvisor failed: %s.'),
e)
retcode = errno.EFAULT
retry = CONNECTION_RETRY
while (connection and retry):
try:
connection.request(method, url, payload, header)
except http_client.CannotSendRequest as e:
connection.close()
time.sleep(1)
connection = http_client.HTTPSConnection(self.ip,
self.port,
timeout=60)
retry -= 1
if connection:
if retry == 0:
retcode = errno.ENOTCONN
else:
retcode = 0
else:
retcode = errno.ENOTCONN
continue
except Exception as e:
LOG.error(_LE('Failed to send request: %s.'),
e)
retcode = errno.EFAULT
break
if retcode == 0:
try:
response = connection.getresponse()
if response.status == http_client.SERVICE_UNAVAILABLE:
LOG.error(_LE('The Flexvisor service is unavailable.'))
time.sleep(1)
retry -= 1
retcode = errno.ENOPROTOOPT
continue
else:
retcode = 0
break
except http_client.ResponseNotReady as e:
time.sleep(1)
retry -= 1
retcode = errno.EFAULT
continue
except Exception as e:
LOG.error(_LE('Failed to get response: %s.'),
e)
retcode = errno.EFAULT
break
if (retcode == 0 and response.status in expected_status
and response.status == http_client.NOT_FOUND):
retcode = errno.ENODATA
elif retcode == 0 and response.status not in expected_status:
LOG.error(_LE('%(method)s %(url)s unexpected response status: '
'%(response)s (expects: %(expects)s).'),
{'method': method,
'url': url,
'response': http_client.responses[response.status],
'expects': expected_status})
if response.status == http_client.UNAUTHORIZED:
raise exception.NotAuthorized
else:
retcode = errno.EIO
elif retcode == 0 and response.status is http_client.NOT_FOUND:
retcode = errno.ENODATA
elif retcode == 0 and response.status is http_client.ACCEPTED:
retcode = errno.EAGAIN
try:
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
except Exception as e:
LOG.error(_LE('Read response raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
elif (retcode == 0 and
response.status in [http_client.OK, http_client.CREATED] and
http_client.NO_CONTENT not in expected_status):
try:
data = response.read()
data = json.loads(data)
except (TypeError, ValueError) as e:
LOG.error(_LE('Call to json.loads() raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
except Exception as e:
LOG.error(_LE('Read response raised an exception: %s.'),
e)
retcode = errno.ENOEXEC
if connection:
connection.close()
return retcode, data
class DPLVolume(object):
def __init__(self, dplServer, dplPort, dplUser, dplPassword):
self.objCmd = DPLCommand(dplServer, dplPort, dplUser, dplPassword)
def _execute(self, method, url, params, expected_status):
if self.objCmd:
return self.objCmd.send_cmd(method, url, params, expected_status)
else:
return -1, None
def _gen_snapshot_url(self, vdevid, snapshotid):
snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid)
return snapshot_url
def get_server_info(self):
method = 'GET'
url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM))
return self._execute(method, url, None,
[http_client.OK, http_client.ACCEPTED])
def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize,
fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS,
snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
if volumeName is None or volumeName == '':
metadata['display_name'] = volumeID
else:
metadata['display_name'] = volumeName
metadata['display_description'] = volumeDesc
metadata['pool_uuid'] = poolID
metadata['total_capacity'] = volumeSize
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota is not None:
metadata['snapshot_quota'] = int(snapshot_quota)
metadata['properties'] = dict(thin_provision=fthinprovision)
params['metadata'] = metadata
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize,
maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
if volumeName is None or volumeName == '':
metadata['display_name'] = volumeID
else:
metadata['display_name'] = volumeName
metadata['display_description'] = volumeDesc
metadata['total_capacity'] = int(volumeSize)
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota is not None:
metadata['snapshot_quota'] = snapshot_quota
params['metadata'] = metadata
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def delete_vdev(self, volumeID, force=True):
method = 'DELETE'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID)
metadata['force'] = force
params['metadata'] = metadata
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.NOT_FOUND, http_client.NO_CONTENT])
def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc,
snapshotID, poolID, fthinprovision=True,
maximum_snapshot=MAXSNAPSHOTS,
snapshot_quota=None):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID)
metadata['snapshot_operation'] = 'copy'
if vdevDisplayName is None or vdevDisplayName == "":
metadata['display_name'] = vdevID
else:
metadata['display_name'] = vdevDisplayName
metadata['display_description'] = vdevDesc
metadata['pool_uuid'] = poolID
metadata['properties'] = {}
metadata['maximum_snapshot'] = maximum_snapshot
if snapshot_quota:
metadata['snapshot_quota'] = snapshot_quota
metadata['properties'] = dict(thin_provision=fthinprovision)
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(vdevID, snapshotID)
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id,
vol_display_name, description, snap_id):
method = 'PUT'
params = {}
metadata = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id)
metadata['snapshot_operation'] = 'spawn'
if vol_display_name is None or vol_display_name == '':
metadata['display_name'] = new_vol_id
else:
metadata['display_name'] = vol_display_name
metadata['display_description'] = description
params['metadata'] = metadata
params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id)
return self._execute(method, url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def get_pools(self):
method = 'GET'
url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL)
return self._execute(method, url, None, [http_client.OK])
def get_pool(self, poolid):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid)
return self._execute(method, url, None,
[http_client.OK, http_client.ACCEPTED])
def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName,
volumeDesc, volumeSize, fthinprovision=True,
maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None):
method = 'PUT'
params = {}
metadata = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID)
metadata["snapshot_operation"] = "clone"
if volumeName is None or volumeName == '':
metadata["display_name"] = NewVolumeID
else:
metadata["display_name"] = volumeName
metadata["display_description"] = volumeDesc
metadata["pool_uuid"] = poolID
metadata["total_capacity"] = volumeSize
metadata["maximum_snapshot"] = maximum_snapshot
if snapshot_quota:
metadata["snapshot_quota"] = snapshot_quota
metadata["properties"] = dict(thin_provision=fthinprovision)
params["metadata"] = metadata
params["copy"] = SourceVolumeID
return self._execute(method,
url, params,
[http_client.OK, http_client.CREATED,
http_client.ACCEPTED])
def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='',
snapshotdes='', isgroup=False):
method = 'PUT'
metadata = {}
params = {}
if isgroup:
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid)
else:
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
if not snapshotname:
metadata['display_name'] = snapshotid
else:
metadata['display_name'] = snapshotname
metadata['display_description'] = snapshotdes
params['metadata'] = metadata
params['snapshot'] = snapshotid
return self._execute(method,
url, params,
[http_client.OK, http_client.CREATED,
http_client.ACCEPTED])
def get_vdev(self, vdevid):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
return self._execute(method,
url, None,
[http_client.OK, http_client.ACCEPTED,
http_client.NOT_FOUND])
def get_vdev_status(self, vdevid, eventid):
method = 'GET'
url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME,
vdevid, eventid))
return self._execute(method,
url, None,
[http_client.OK, http_client.NOT_FOUND])
def get_pool_status(self, poolid, eventid):
method = 'GET'
url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL,
poolid, eventid))
return self._execute(method,
url, None,
[http_client.OK, http_client.NOT_FOUND])
def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'assign'
exports['Network/iSCSI'] = {}
target_info = {}
target_info['logical_unit_number'] = 0
target_info['logical_unit_name'] = lunname
permissions = []
portals = []
portals.append(portal)
permissions.append(iqn)
target_info['permissions'] = permissions
target_info['portals'] = portals
exports['Network/iSCSI'] = target_info
params['metadata'] = metadata
params['exports'] = exports
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname,
lunid=-1):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'assign'
exports['Network/FC'] = {}
target_info = {}
target_info['target_identifier'] = targetwwpn
target_info['logical_unit_number'] = lunid
target_info['logical_unit_name'] = lunname
target_info['permissions'] = initiatorwwpn
exports['Network/FC'] = target_info
params['metadata'] = metadata
params['exports'] = exports
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/iSCSI'] = {}
exports['Network/iSCSI']['target_identifier'] = targetIqn
permissions = []
permissions.append(initiatorIqn)
exports['Network/iSCSI']['permissions'] = permissions
params['exports'] = exports
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.NO_CONTENT, http_client.NOT_FOUND])
def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns):
method = 'PUT'
metadata = {}
exports = {}
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
metadata['export_operation'] = 'unassign'
params['metadata'] = metadata
exports['Network/FC'] = {}
exports['Network/FC']['target_identifier'] = targetwwpn
permissions = initiatorwwpns
exports['Network/FC']['permissions'] = permissions
params['exports'] = exports
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.NO_CONTENT, http_client.NOT_FOUND])
def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False):
method = 'DELETE'
if isGroup:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1,
DPL_OBJ_VOLUMEGROUP,
objID,
DPL_OBJ_SNAPSHOT, snapshotID))
else:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1,
DPL_OBJ_VOLUME, objID,
DPL_OBJ_SNAPSHOT, snapshotID))
return self._execute(method,
url, None,
[http_client.OK, http_client.ACCEPTED,
http_client.NO_CONTENT, http_client.NOT_FOUND])
def rollback_vdev(self, vdevid, snapshotid):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid)
params['copy'] = self._gen_snapshot_url(vdevid, snapshotid)
return self._execute(method,
url, params,
[http_client.OK, http_client.ACCEPTED])
def list_vdev_snapshots(self, vdevid, isGroup=False):
method = 'GET'
if isGroup:
url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid,
DPL_OBJ_SNAPSHOT))
else:
url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME,
vdevid, DPL_OBJ_SNAPSHOT))
return self._execute(method,
url, None,
[http_client.OK])
def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False):
method = 'GET'
if isGroup:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP,
vdevid, DPL_OBJ_SNAPSHOT, snapshotID))
else:
url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid,
DPL_OBJ_SNAPSHOT, snapshotID))
return self._execute(method,
url, None,
[http_client.OK])
def create_target(self, targetID, protocol, displayName, targetAddress,
description=''):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
params['metadata'] = {}
metadata = params['metadata']
metadata['type'] = 'target'
metadata['protocol'] = protocol
if displayName is None or displayName == '':
metadata['display_name'] = targetID
else:
metadata['display_name'] = displayName
metadata['display_description'] = description
metadata['address'] = targetAddress
return self._execute(method, url, params, [http_client.OK])
def get_target(self, targetID):
method = 'GET'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
return self._execute(method, url, None, [http_client.OK])
def delete_target(self, targetID):
method = 'DELETE'
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID)
return self._execute(method,
url, None,
[http_client.OK, http_client.ACCEPTED,
http_client.NOT_FOUND])
def get_target_list(self, type='target'):
# type = target/initiator
method = 'GET'
if type is None:
url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT)
else:
url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type)
return self._execute(method, url, None, [http_client.OK])
def get_sns_table(self, wwpn):
method = 'PUT'
params = {}
url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS)
params['metadata'] = {}
params['metadata']['protocol'] = 'fc'
params['metadata']['address'] = str(wwpn)
return self._execute(method, url, params, [http_client.OK])
def create_vg(self, groupID, groupName, groupDesc='', listVolume=None,
maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True):
method = 'PUT'
metadata = {}
params = {}
properties = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
if listVolume:
metadata['volume'] = listVolume
else:
metadata['volume'] = []
metadata['display_name'] = groupName
metadata['display_description'] = groupDesc
metadata['maximum_snapshot'] = maxSnapshots
properties['snapshot_rotation'] = rotationSnapshot
metadata['properties'] = properties
params['metadata'] = metadata
return self._execute(method, url, params,
[http_client.OK, http_client.ACCEPTED,
http_client.CREATED])
def get_vg_list(self, vgtype=None):
method = 'GET'
if vgtype:
url = '/%s/?volume_group_type=%s' % (DPL_OBJ_VOLUMEGROUP, vgtype)
else:
url = '/%s/' % (DPL_OBJ_VOLUMEGROUP)
return self._execute(method, url, None, [http_client.OK])
def get_vg(self, groupID):
method = 'GET'
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
return self._execute(method, url, None, [http_client.OK])
def delete_vg(self, groupID, force=True):
method = 'DELETE'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['force'] = force
params['metadata'] = metadata
return self._execute(method, url, params,
[http_client.NO_CONTENT, http_client.NOT_FOUND])
def join_vg(self, volumeID, groupID):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['volume_group_operation'] = 'join'
metadata['volume'] = []
metadata['volume'].append(volumeID)
params['metadata'] = metadata
return self._execute(method, url, params,
[http_client.OK, http_client.ACCEPTED])
def leave_vg(self, volumeID, groupID):
method = 'PUT'
metadata = {}
params = {}
url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID)
metadata['volume_group_operation'] = 'leave'
metadata['volume'] = []
metadata['volume'].append(volumeID)
params['metadata'] = metadata
return self._execute(method, url, params,
[http_client.OK, http_client.ACCEPTED])
class DPLCOMMONDriver(driver.ConsistencyGroupVD, driver.ExtendVD,
driver.CloneableImageVD,
driver.SnapshotVD, driver.LocalVD, driver.BaseVD):
"""Class of dpl storage adapter."""
VERSION = '2.0.4'
def __init__(self, *args, **kwargs):
super(DPLCOMMONDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(options.DPL_OPTS)
self.configuration.append_config_values(san.san_opts)
self.dpl = DPLVolume(self.configuration.san_ip,
self.configuration.dpl_port,
self.configuration.san_login,
self.configuration.san_password)
self._stats = {}
def _convert_size_GB(self, size):
s = round(float(size) / units.Gi, 2)
if s > 0:
return s
else:
return 0
def _conver_uuid2hex(self, strID):
if strID:
return strID.replace('-', '')
else:
return None
def _get_event_uuid(self, output):
ret = 0
event_uuid = ""
if (type(output) is dict and
output.get("metadata") and output["metadata"]):
if (output["metadata"].get("event_uuid") and
output["metadata"]["event_uuid"]):
event_uuid = output["metadata"]["event_uuid"]
else:
ret = errno.EINVAL
else:
ret = errno.EINVAL
return ret, event_uuid
def _wait_event(self, callFun, objuuid, eventid=None):
nRetry = 30
fExit = False
status = {}
status['state'] = 'error'
status['output'] = {}
while nRetry:
try:
if eventid:
ret, output = callFun(
self._conver_uuid2hex(objuuid),
self._conver_uuid2hex(eventid))
else:
ret, output = callFun(self._conver_uuid2hex(objuuid))
if ret == 0:
if output['completionStatus'] == 'Complete':
fExit = True
status['state'] = 'available'
status['output'] = output
elif output['completionStatus'] == 'Error':
fExit = True
status['state'] = 'error'
raise loopingcall.LoopingCallDone(retvalue=False)
else:
nsleep = random.randint(0, 10)
value = round(float(nsleep) / 10, 2)
time.sleep(value)
elif ret == errno.ENODATA:
status['state'] = 'deleted'
fExit = True
else:
nRetry -= 1
time.sleep(3)
continue
except Exception as e:
LOG.error(_LE('Flexvisor failed to get event %(volume)s '
'(%(status)s).'),
{'volume': eventid, 'status': e})
raise loopingcall.LoopingCallDone(retvalue=False)
if fExit is True:
break
return status
def _join_volume_group(self, volume, cgId):
# Join volume group if consistency group id not empty
msg = ''
try:
ret, output = self.dpl.join_vg(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(cgId))
except Exception as e:
ret = errno.EFAULT
msg = _('Fexvisor failed to add volume %(id)s '
'due to %(reason)s.') % {"id": volume['id'],
"reason": six.text_type(e)}
if ret:
if not msg:
msg = _('Flexvisor failed to add volume %(id)s '
'to group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to add volume %(id)s to '
'group %(cgid)s.'),
{'id': volume['id'], 'cgid': cgId})
def _leave_volume_group(self, volume, cgId):
# Leave volume group if consistency group id not empty
msg = ''
try:
ret, output = self.dpl.leave_vg(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(cgId))
except Exception as e:
ret = errno.EFAULT
msg = _('Fexvisor failed to remove volume %(id)s '
'due to %(reason)s.') % {"id": volume['id'],
"reason": six.text_type(e)}
if ret:
if not msg:
msg = _('Flexvisor failed to remove volume %(id)s '
'from group %(cgid)s.') % {'id': volume['id'],
'cgid': cgId}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to remove volume %(id)s from '
'group %(cgid)s.'),
{'id': volume['id'], 'cgid': cgId})
def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID):
snapshotID = None
ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True)
if ret == 0:
volumes = out.get('metadata', {}).get('member', {})
if volumes:
snapshotID = volumes.get(volumeID, None)
else:
msg = _('Flexvisor failed to get snapshot id of volume '
'%(id)s from group %(vgid)s.') % {'id': volumeID,
'vgid': vgID}
raise exception.VolumeBackendAPIException(data=msg)
if not snapshotID:
msg = _('Flexvisor could not find volume %(id)s snapshot in'
' the group %(vgid)s snapshot '
'%(vgsid)s.') % {'id': volumeID, 'vgid': vgID,
'vgsid': vgsnapshotID}
raise exception.VolumeBackendAPIException(data=msg)
return snapshotID
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
LOG.info(_LI('Start to create consistency group: %(group_name)s '
'id: %(id)s'),
{'group_name': group['name'], 'id': group['id']})
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
try:
ret, output = self.dpl.create_vg(
self._conver_uuid2hex(group['id']),
group['name'],
group['description'])
if ret:
msg = _('Failed to create consistency group '
'%(id)s:%(ret)s.') % {'id': group['id'],
'ret': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
return model_update
except Exception as e:
msg = _('Failed to create consistency group '
'%(id)s due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete a consistency group."""
ret = 0
volumes = self.db.volume_get_all_by_group(
context, group['id'])
model_update = {}
model_update['status'] = group['status']
LOG.info(_LI('Start to delete consistency group: %(cg_name)s'),
{'cg_name': group['id']})
try:
self.dpl.delete_vg(self._conver_uuid2hex(group['id']))
except Exception as e:
msg = _('Failed to delete consistency group %(id)s '
'due to %(reason)s.') % {'id': group['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
for volume_ref in volumes:
try:
self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id']))
volume_ref['status'] = 'deleted'
except Exception:
ret = errno.EFAULT
volume_ref['status'] = 'error_deleting'
model_update['status'] = (
fields.ConsistencyGroupStatus.ERROR_DELETING)
if ret == 0:
model_update['status'] = fields.ConsistencyGroupStatus.DELETED
return model_update, volumes
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot['id'])
model_update = {}
LOG.info(_LI('Start to create cgsnapshot for consistency group'
': %(group_name)s'),
{'group_name': cgsnapshot['consistencygroup_id']})
try:
self.dpl.create_vdev_snapshot(
self._conver_uuid2hex(cgsnapshot['consistencygroup_id']),
self._conver_uuid2hex(cgsnapshot['id']),
cgsnapshot['name'],
cgsnapshot.get('description', ''),
True)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.AVAILABLE
except Exception as e:
msg = _('Failed to create cg snapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'available'
return model_update, snapshots
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, cgsnapshot['id'])
model_update = {}
model_update['status'] = cgsnapshot['status']
LOG.info(_LI('Delete cgsnapshot %(snap_name)s for consistency group: '
'%(group_name)s'),
{'snap_name': cgsnapshot['id'],
'group_name': cgsnapshot['consistencygroup_id']})
try:
self.dpl.delete_vdev_snapshot(
self._conver_uuid2hex(cgsnapshot['consistencygroup_id']),
self._conver_uuid2hex(cgsnapshot['id']), True)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.DELETED
except Exception as e:
msg = _('Failed to delete cgsnapshot %(id)s due to '
'%(reason)s.') % {'id': cgsnapshot['id'],
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
model_update['status'] = 'deleted'
return model_update, snapshots
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
addvollist = []
removevollist = []
cgid = group['id']
vid = ''
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
# Get current group info in backend storage.
ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid))
if ret == 0:
group_members = output.get('children', [])
if add_volumes:
addvollist = add_volumes
if remove_volumes:
removevollist = remove_volumes
# Process join volumes.
try:
for volume in addvollist:
vid = volume['id']
# Verify the volume exists in the group or not.
if self._conver_uuid2hex(vid) in group_members:
continue
self._join_volume_group(volume, cgid)
except Exception as e:
msg = _("Fexvisor failed to join the volume %(vol)s in the "
"group %(group)s due to "
"%(ret)s.") % {"vol": vid, "group": cgid,
"ret": six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
# Process leave volumes.
try:
for volume in removevollist:
vid = volume['id']
if self._conver_uuid2hex(vid) in group_members:
self._leave_volume_group(volume, cgid)
except Exception as e:
msg = _("Fexvisor failed to remove the volume %(vol)s in the "
"group %(group)s due to "
"%(ret)s.") % {"vol": vid, "group": cgid,
"ret": six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
return model_update, None, None
def create_volume(self, volume):
"""Create a volume."""
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
pool,
int(volume['size']) * units.Gi,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create volume %(volume)s: '
'%(status)s.') % {'volume': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to create volume (get event) '
'%s.') % (volume['id'])
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor create volume failed.:%(volumeid)s:'
'%(status)s.') % {'volumeid': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s.'),
{'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
msg = _('Flexvisor failed to create volume %(id)s in the '
'group %(vgid)s.') % {
'id': volume['id'],
'vgid': volume['consistencygroup_id']}
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
src_volume = None
vgID = None
# Detect whether a member of the group.
snapshotID = snapshot['id']
# Try to get cgid if volume belong in the group.
src_volumeID = snapshot['volume_id']
cgsnapshotID = snapshot.get('cgsnapshot_id', None)
if cgsnapshotID:
try:
src_volume = self.db.volume_get(src_volumeID)
except Exception:
msg = _("Flexvisor unable to find the source volume "
"%(id)s info.") % {'id': src_volumeID}
raise exception.VolumeBackendAPIException(data=msg)
if src_volume:
vgID = src_volume.get('consistencygroup_id', None)
# Get the volume origin snapshot id if the source snapshot is group
# snapshot.
if vgID:
snapshotID = self._get_snapshotid_of_vgsnapshot(
self._conver_uuid2hex(vgID),
self._conver_uuid2hex(cgsnapshotID),
self._conver_uuid2hex(src_volumeID))
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.create_vdev_from_snapshot(
self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
self._conver_uuid2hex(snapshotID),
pool,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to create volume from '
'snapshot %(id)s:'
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to create volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s '
'from snapshot.'), {'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
raise
def spawn_volume_from_snapshot(self, volume, snapshot):
"""Spawn a REFERENCED volume from a snapshot."""
ret, output = self.dpl.spawn_vdev_from_snapshot(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(snapshot['volume_id']),
volume.get('display_name', ''),
volume.get('display_description', ''),
self._conver_uuid2hex(snapshot['id']))
if ret == errno.EAGAIN:
# its an async process
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'], event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to spawn volume from snapshot '
'%(id)s:%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to spawn volume from snapshot '
'(failed to get event) '
'%(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create volume from snapshot '
'%(id)s: %(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to create volume %(id)s '
'from snapshot.'), {'id': volume['id']})
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
pool = volume_utils.extract_host(volume['host'],
level='pool')
if not pool:
if not self.configuration.dpl_pool:
msg = _("Pool is not available in the volume host fields.")
raise exception.InvalidHost(reason=msg)
else:
pool = self.configuration.dpl_pool
ret, output = self.dpl.clone_vdev(
self._conver_uuid2hex(src_vref['id']),
self._conver_uuid2hex(volume['id']),
pool,
volume.get('display_name', ''),
volume.get('display_description', ''),
int(volume['size']) * units.Gi,
self.configuration.san_thin_provision)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to clone volume (failed to'
' get event) %(id)s.') % {'id': src_vref['id']}
raise exception.VolumeBackendAPIException(
data=msg)
elif ret != 0:
msg = _('Flexvisor failed to clone volume %(id)s: '
'%(status)s.') % {'id': src_vref['id'], 'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to clone volume %(id)s.'),
{'id': volume['id']})
if volume.get('consistencygroup_id', None):
try:
self._join_volume_group(volume, volume['consistencygroup_id'])
except Exception:
# Delete volume if volume failed to join group.
self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
msg = _('Flexvisor volume %(id)s failed to join group '
'%(vgid)s.') % {'id': volume['id'],
'vgid': volume['consistencygroup_id']}
raise exception.VolumeBackendAPIException(data=msg)
def delete_volume(self, volume):
"""Deletes a volume."""
ret = 0
if volume.get('consistencygroup_id', None):
msg = ''
try:
ret, out = self.dpl.leave_vg(
self._conver_uuid2hex(volume['id']),
self._conver_uuid2hex(volume['consistencygroup_id']))
if ret:
LOG.warning(_LW('Flexvisor failed to delete volume '
'%(id)s from the group %(vgid)s.'),
{'id': volume['id'],
'vgid': volume['consistencygroup_id']})
except Exception as e:
LOG.warning(_LW('Flexvisor failed to delete volume %(id)s '
'from group %(vgid)s due to %(status)s.'),
{'id': volume['id'],
'vgid': volume['consistencygroup_id'],
'status': e})
if ret:
ret = 0
ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id']))
if ret == errno.EAGAIN:
status = self._wait_event(self.dpl.get_vdev, volume['id'])
if status['state'] == 'error':
msg = _('Flexvisor failed deleting volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
ret = 0
LOG.info(_LI('Flexvisor volume %(id)s does not '
'exist.'), {'id': volume['id']})
elif ret != 0:
msg = _('Flexvisor failed to delete volume %(id)s: '
'%(status)s.') % {'id': volume['id'], 'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
def extend_volume(self, volume, new_size):
ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']),
volume.get('display_name', ''),
volume.get('display_description',
''),
new_size * units.Gi)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
volume['id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to extend volume '
'%(id)s:%(status)s.') % {'id': volume,
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
msg = _('Flexvisor failed to extend volume '
'(failed to get event) '
'%(id)s.') % {'id': volume['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to extend volume '
'%(id)s: %(status)s.') % {'id': volume['id'],
'status': ret}
raise exception.VolumeBackendAPIException(
data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to extend volume'
' %(id)s.'), {'id': volume['id']})
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ret, output = self.dpl.create_vdev_snapshot(
self._conver_uuid2hex(snapshot['volume_id']),
self._conver_uuid2hex(snapshot['id']),
snapshot.get('display_name', ''),
snapshot.get('display_description', ''))
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
snapshot['volume_id'],
event_uuid)
if status['state'] != 'available':
msg = (_('Flexvisor failed to create snapshot for volume '
'%(id)s: %(status)s.') %
{'id': snapshot['volume_id'], 'status': ret})
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = (_('Flexvisor failed to create snapshot for volume '
'(failed to get event) %(id)s.') %
{'id': snapshot['volume_id']})
raise exception.VolumeBackendAPIException(data=msg)
elif ret != 0:
msg = _('Flexvisor failed to create snapshot for volume %(id)s: '
'%(status)s.') % {'id': snapshot['volume_id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ret, output = self.dpl.delete_vdev_snapshot(
self._conver_uuid2hex(snapshot['volume_id']),
self._conver_uuid2hex(snapshot['id']))
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_vdev_status,
snapshot['volume_id'],
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'],
'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _('Flexvisor failed to delete snapshot (failed to '
'get event) %(id)s.') % {'id': snapshot['id']}
raise exception.VolumeBackendAPIException(data=msg)
elif ret == errno.ENODATA:
LOG.info(_LI('Flexvisor snapshot %(id)s not existed.'),
{'id': snapshot['id']})
elif ret != 0:
msg = _('Flexvisor failed to delete snapshot %(id)s: '
'%(status)s.') % {'id': snapshot['id'], 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI('Flexvisor succeeded to delete snapshot %(id)s.'),
{'id': snapshot['id']})
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_pools(self):
pools = []
qpools = []
# Defined access pool by cinder configuration.
defined_pool = self.configuration.dpl_pool
if defined_pool:
qpools.append(defined_pool)
else:
try:
ret, output = self.dpl.get_pools()
if ret == 0:
for poolUuid, poolName in output.get('children', []):
qpools.append(poolUuid)
else:
LOG.error(_LE("Flexvisor failed to get pool list."
"(Error: %d)"), ret)
except Exception as e:
LOG.error(_LE("Flexvisor failed to get pool list due to "
"%s."), e)
# Query pool detail information
for poolid in qpools:
ret, output = self._get_pool_info(poolid)
if ret == 0:
pool = {}
pool['pool_name'] = output['metadata']['pool_uuid']
pool['total_capacity_gb'] = (
self._convert_size_GB(
int(output['metadata']['total_capacity'])))
pool['free_capacity_gb'] = (
self._convert_size_GB(
int(output['metadata']['available_capacity'])))
pool['allocated_capacity_gb'] = (
self._convert_size_GB(
int(output['metadata']['used_capacity'])))
pool['QoS_support'] = False
pool['reserved_percentage'] = 0
pools.append(pool)
else:
LOG.warning(_LW("Failed to query pool %(id)s status "
"%(ret)d."), {'id': poolid, 'ret': ret})
continue
return pools
def _update_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
data = {}
pools = self._get_pools()
data['volume_backend_name'] = (
self.configuration.safe_get('volume_backend_name'))
location_info = '%(driver)s:%(host)s:%(volume)s' % {
'driver': self.__class__.__name__,
'host': self.configuration.san_ip,
'volume': self.configuration.dpl_pool
}
try:
ret, output = self.dpl.get_server_info()
if ret == 0:
data['vendor_name'] = output['metadata']['vendor']
data['driver_version'] = output['metadata']['version']
data['storage_protocol'] = 'iSCSI'
data['location_info'] = location_info
data['consistencygroup_support'] = True
data['pools'] = pools
self._stats = data
except Exception as e:
LOG.error(_LE('Failed to get server info due to '
'%(state)s.'), {'state': e})
return self._stats
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
self.context = context
LOG.info(_LI('Activate Flexvisor cinder volume driver.'))
def check_for_setup_error(self):
"""Check DPL can connect properly."""
pass
def _get_pool_info(self, poolid):
"""Query pool information."""
ret, output = self.dpl.get_pool(poolid)
if ret == errno.EAGAIN:
ret, event_uuid = self._get_event_uuid(output)
if ret == 0:
status = self._wait_event(self.dpl.get_pool_status, poolid,
event_uuid)
if status['state'] != 'available':
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
ret = 0
output = status.get('output', {})
else:
LOG.error(_LE('Flexvisor failed to get pool %(id)s info.'),
{'id': poolid})
raise exception.VolumeBackendAPIException(
data="failed to get event")
elif ret != 0:
msg = _('Flexvisor failed to get pool info %(id)s: '
'%(status)s.') % {'id': poolid, 'status': ret}
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.debug('Flexvisor succeeded to get pool info.')
return ret, output
|
{
"content_hash": "6a7bbd3527521877fcdee7b1434d66e4",
"timestamp": "",
"source": "github",
"line_count": 1483,
"max_line_length": 79,
"avg_line_length": 41.80512474713419,
"alnum_prop": 0.4914270045324774,
"repo_name": "bswartz/cinder",
"id": "4c5c9ca5c633f2aba0f4eca0dd9d3ce510839757",
"size": "62633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/prophetstor/dplcommon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16345375"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
from pyparsing import oneOf, OneOrMore, printables, StringEnd
test = "The quick brown fox named 'Aloysius' lives at 123 Main Street (and jumps over lazy dogs in his spare time)."
nonAlphas = [ c for c in printables if not c.isalpha() ]
print("Extract vowels, consonants, and special characters from this test string:")
print("'" + test + "'")
print()
print("Define grammar using normal results names")
print("(only last matching symbol is saved)")
vowels = oneOf(list("aeiouy"), caseless=True).setResultsName("vowels")
cons = oneOf(list("bcdfghjklmnpqrstvwxz"), caseless=True).setResultsName("cons")
other = oneOf(list(nonAlphas)).setResultsName("others")
letters = OneOrMore(cons | vowels | other) + StringEnd()
results = letters.parseString(test)
print(results)
print(results.vowels)
print(results.cons)
print(results.others)
print()
print("Define grammar using results names, with listAllMatches=True")
print("(all matching symbols are saved)")
vowels = oneOf(list("aeiouy"), caseless=True).setResultsName("vowels",listAllMatches=True)
cons = oneOf(list("bcdfghjklmnpqrstvwxz"), caseless=True).setResultsName("cons",listAllMatches=True)
other = oneOf(list(nonAlphas)).setResultsName("others",listAllMatches=True)
letters = OneOrMore(cons | vowels | other) + StringEnd()
results = letters.parseString(test)
print(results)
print(sorted(list(set(results))))
print()
print(results.vowels)
print(sorted(list(set(results.vowels))))
print()
print(results.cons)
print(sorted(list(set(results.cons))))
print()
print(results.others)
print(sorted(list(set(results.others))))
|
{
"content_hash": "fd726c3af658a406f498dc3ee29f3003",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 116,
"avg_line_length": 35.022222222222226,
"alnum_prop": 0.758248730964467,
"repo_name": "nzavagli/UnrealPy",
"id": "241876c5faf4c6d13e3685794c9dcf9e9a33b0b6",
"size": "1736",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pyparsing-2.0.3/examples/listAllMatches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from cms.api import create_page, add_plugin
from cms.models import Page
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from djangocms_text_ckeditor.models import Text
from cms.tests.plugins import PluginsTestBaseCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.compat.tests import UnittestCompatMixin
URL_CMS_MOVE_PLUGIN = u'/en/admin/cms/page/%d/move-plugin/'
class NestedPluginsTestCase(PluginsTestBaseCase, UnittestCompatMixin):
def copy_placeholders_and_check_results(self, placeholders):
"""
This function is not itself a test; rather, it can be used by any test
that has created placeholders. It will check that whatever the plugin
structure in the placeholder, it will be copied accurately when they are
copied.
placeholders is a list of placeholders
"""
for original_placeholder in placeholders:
# get the plugins
original_plugins = original_placeholder.get_plugins()
# copy them to a new placeholder
copied_placeholder = Placeholder.objects.create(slot=original_placeholder.slot)
copy_plugins_to(
original_placeholder.get_plugins(),
copied_placeholder
)
copied_plugins = copied_placeholder.get_plugins()
# we should find the same number of plugins in both placeholders
self.assertEquals(
original_plugins.count(),
copied_plugins.count()
)
# quick check: make sure the two querysets match:
for original, copy in zip(original_plugins, copied_plugins):
self.assertEquals(
Text.objects.get(id=original.id).body,
Text.objects.get(id=copy.id).body
)
# Now build a *tree* of the plugins, and match those - it's not
# enough just to compare querysets as above; we should *also* check
# that when we build a tree, the various nodes are assembled as we
# would expect. We will pump the trees into a pair of lists:
original_plugins_list = []
copied_plugins_list = []
# This function builds the tree of plugins, starting from its roots.
# In that respect it's like many of the plugin tree-building
# routines elsewhere in the system.
def plugin_list_from_tree(roots, plugin_list):
for plugin in roots:
plugin_list.append(plugin)
# recurse over the set of nodes
plugin_list_from_tree(plugin.get_children(), plugin_list)
# build the tree for each set of plugins
plugin_list_from_tree(original_plugins.filter(level=0), original_plugins_list)
plugin_list_from_tree(copied_plugins.filter(level=0), copied_plugins_list)
# Check that each pair of items in the two lists match, in lots of
# different ways
for original, copy in zip(original_plugins_list, copied_plugins_list):
original_text_plugin = Text.objects.get(id=original.id)
copied_text_plugin = Text.objects.get(id=copy.id)
# This first one is a sanity test, just to prove that we aren't
# simply comparing *exactly the same items* in all these tests.
# It could happen...
self.assertNotEquals(original.id, copy.id)
self.assertEquals(
original_text_plugin.body,
copied_text_plugin.body
)
self.assertEquals(
original_text_plugin.level,
copied_text_plugin.level
)
self.assertEquals(
original_text_plugin.position,
copied_text_plugin.position
)
self.assertEquals(
original_text_plugin.rght,
copied_text_plugin.rght
)
self.assertEquals(
original_text_plugin.lft,
copied_text_plugin.lft
)
self.assertEquals(
original_text_plugin.get_descendant_count(),
copied_text_plugin.get_descendant_count()
)
self.assertEquals(
original_text_plugin.get_ancestors().count(),
copied_text_plugin.get_ancestors().count()
)
# just in case the test method that called us wants it:
return copied_placeholder
def test_plugin_deep_nesting_and_copying(self):
"""
Create a deeply-nested plugin structure, tests its properties, and tests
that it is copied accurately when the placeholder containing them is
copied.
The structure below isn't arbitrary, but has been designed to test
various conditions, including:
* nodes four levels deep
* multiple successive level increases
* multiple successive level decreases
* successive nodes on the same level followed by level changes
* multiple level decreases between successive nodes
* siblings with and without children
* nodes and branches added to the tree out of sequence
First we create the structure:
11
1
2
12
4
10
8
3
9
5
6
7
13
14
and then we move it all around.
"""
placeholder = Placeholder(slot=u"some_slot")
placeholder.save() # a good idea, if not strictly necessary
# plugin in placeholder
plugin_1 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"01",
)
plugin_1.save()
# IMPORTANT: plugins must be reloaded, before they can be assigned
# as a parent. Otherwise, the MPTT structure doesn't seem to rebuild
# properly.
# child of plugin_1
plugin_2 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"02",
)
plugin_1 = self.reload(plugin_1)
plugin_2.parent = plugin_1
plugin_2.save()
# plugin_2 should be plugin_1's only child
# for a single item we use assertSequenceEqual
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_2.pk)])
# create a second child of plugin_1
plugin_3 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"03",
)
plugin_1 = self.reload(plugin_1)
plugin_3.parent = plugin_1
plugin_3.save()
# plugin_2 & plugin_3 should be plugin_1's children
# for multiple items we use assertSequenceEqual, because
# assertSequenceEqual may re-order the list without warning
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
])
# child of plugin_2
plugin_4 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"04",
)
plugin_2 = self.reload(plugin_2)
plugin_4.parent = plugin_2
plugin_4.save()
# plugin_4 should be plugin_2's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_4.pk)])
# 2,3 & 4 should be descendants of 1
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_descendants(),
[
# note tree_id ordering of MPTT reflected here:
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_3.pk),
],
)
# create a second root plugin
plugin_5 = add_plugin(placeholder, u"TextPlugin", u"en",
# force this to first-child, to make the tree more challenging
position='first-child',
body=u"05",
)
plugin_5.save()
# child of plugin_5
plugin_6 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"06",
)
plugin_5 = self.reload(plugin_5)
plugin_6.parent = plugin_5
plugin_6.save()
# plugin_6 should be plugin_5's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_6.pk)])
# child of plugin_6
plugin_7 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"07",
)
plugin_5 = self.reload(plugin_5)
plugin_7.parent = plugin_5
plugin_7.save()
# plugin_7 should be plugin_5's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk)
])
# 6 & 7 should be descendants of 5
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_descendants(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
])
# another child of plugin_2
plugin_8 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"08",
)
plugin_2 = self.reload(plugin_2)
plugin_8.parent = plugin_2
plugin_8.save()
# plugin_4 should be plugin_2's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk),
])
# child of plugin_3
plugin_9 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"09",
)
plugin_3 = self.reload(plugin_3)
plugin_9.parent = plugin_3
plugin_9.save()
# plugin_9 should be plugin_3's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_3.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_9.pk)])
# child of plugin_4
plugin_10 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"10",
)
plugin_4 = self.reload(plugin_4)
plugin_10.parent = plugin_4
plugin_10.save()
# plugin_10 should be plugin_4's child
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_4.pk).get_children(),
[CMSPlugin.objects.get(id=plugin_10.pk)])
original_plugins = placeholder.get_plugins()
self.assertEquals(original_plugins.count(), 10)
# elder sibling of plugin_1
plugin_1 = self.reload(plugin_1)
plugin_11 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"11",
target=plugin_1,
position="left"
)
plugin_11.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_1.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_2.pk),
CMSPlugin.objects.get(id=plugin_3.pk)
])
# elder sibling of plugin_4
plugin_4 = self.reload(plugin_4)
plugin_12 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"12",
target=plugin_4,
position="left"
)
plugin_12.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_2.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_12.pk),
CMSPlugin.objects.get(id=plugin_4.pk),
CMSPlugin.objects.get(id=plugin_8.pk)
])
# younger sibling of plugin_7
plugin_7 = self.reload(plugin_7)
plugin_13 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"13",
target=plugin_7,
position="right"
)
plugin_13.save()
self.assertSequenceEqual(
CMSPlugin.objects.get(id=plugin_5.pk).get_children(),
[
CMSPlugin.objects.get(id=plugin_6.pk),
CMSPlugin.objects.get(id=plugin_7.pk),
CMSPlugin.objects.get(id=plugin_13.pk)
])
# new sibling of plugin_5
plugin_5 = self.reload(plugin_5)
plugin_14 = add_plugin(placeholder, u"TextPlugin", u"en",
body=u"14"
)
plugin_14.save()
self.assertSequenceEqual(
CMSPlugin.objects.filter(level=0),
[
CMSPlugin.objects.get(id=plugin_11.pk),
CMSPlugin.objects.get(id=plugin_1.pk),
CMSPlugin.objects.get(id=plugin_5.pk),
CMSPlugin.objects.get(id=plugin_14.pk)
])
self.assertEquals(CMSPlugin.objects.get(id=plugin_11.pk).tree_id, 1)
self.copy_placeholders_and_check_results([placeholder])
# now let's move plugins around in the tree
# move plugin_2 before plugin_11
plugin_2 = self.reload(plugin_2)
plugin_2.move_to(target=plugin_1, position="left")
plugin_2.save()
self.assertEquals(CMSPlugin.objects.get(id=plugin_2.pk).tree_id, 1)
self.copy_placeholders_and_check_results([placeholder])
# move plugin_6 after plugin_7
plugin_6 = self.reload(plugin_6)
plugin_7 = self.reload(plugin_7)
plugin_6.move_to(target=plugin_7, position="right")
plugin_6.save()
self.copy_placeholders_and_check_results([placeholder])
# move plugin_3 before plugin_2
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
plugin_3.move_to(target=plugin_2, position="left")
plugin_3.save()
self.copy_placeholders_and_check_results([placeholder])
# make plugin_3 plugin_2's first-child
plugin_2 = self.reload(plugin_2)
plugin_3 = self.reload(plugin_3)
plugin_3.move_to(target=plugin_2, position="first-child")
plugin_3.save()
self.copy_placeholders_and_check_results([placeholder])
# make plugin_7 plugin_2's first-child
plugin_2 = self.reload(plugin_2)
plugin_7 = self.reload(plugin_7)
plugin_7.move_to(target=plugin_3, position="right")
plugin_7.save()
self.copy_placeholders_and_check_results([placeholder, ])
def test_nested_plugin_on_page(self):
"""
Validate a textplugin with a nested link plugin
mptt values are correctly showing a parent child relationship
of a nested plugin
"""
with SettingsOverride(CMS_PERMISSION=False):
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
# add a plugin
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin = self.reload(text_plugin)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
# as for some reason mptt does not
# update the parent child relationship
# in the add_plugin method when a target present
# but this is not the topic of the test
link_plugin.parent = text_plugin
link_plugin.save()
# reloading needs to be done after every save
link_plugin = self.reload(link_plugin)
text_plugin = self.reload(text_plugin)
# mptt related insertion correct?
msg = u"parent plugin right is not updated, child not inserted correctly"
self.assertTrue(text_plugin.rght > link_plugin.rght, msg=msg)
msg = u"link has no parent"
self.assertFalse(link_plugin.parent == None, msg=msg)
msg = u"parent plugin left is not updated, child not inserted correctly"
self.assertTrue(text_plugin.lft < link_plugin.lft, msg=msg)
msg = u"child level is not bigger than parent level"
self.assertTrue(text_plugin.level < link_plugin.level, msg=msg)
# add the link plugin to the body
# emulate the editor in admin that adds some txt for the nested plugin
in_txt = u"""<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = u"%s<p>%s</p>" % (text_plugin.body, (in_txt % (link_plugin.id)))
text_plugin.body = nesting_body
text_plugin.save()
text_plugin = self.reload(text_plugin)
# none of the descendants should have a placeholder other then my own one
self.assertEquals(text_plugin.get_descendants().exclude(placeholder=text_plugin.placeholder).count(), 0)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 2)
def test_copy_page_nested_plugin(self):
"""
Test to verify that page copy with a nested plugin works
page one - 3 placeholder
col_sidebar: 1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
Verify copied page, placeholders, plugins and body text
"""
with SettingsOverride(CMS_PERMISSION=False):
templates = []
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body="Hello World")
self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEquals(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
###
# add a plugin to placeholder two
###
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nesting plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = """<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEquals(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify that the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
self.assertEquals(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(placeholder_count, 3)
self.assertEquals(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 3)
page_one_plugins = CMSPlugin.objects.all()
##
# setup page_copy_target page
##
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(pre_copy_placeholder_count, 6)
# copy the page
superuser = self.get_superuser()
with self.login_user_context(superuser):
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()
self.assertEquals(after_copy_page_plugin_count, 6)
# check the amount of copied stuff
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count,
u"placeholder count is not grown")
self.assertEqual(after_copy_page_count, 3, u"no new page after copy")
# original placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = 'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEquals(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEquals(len(copied_placeholder_two_plugins), 2)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEquals(len(copied_placeholder_three_plugins), 0)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_two_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is genrated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)
needle = u"plugin_obj_%s"
msg = u"child plugin id differs to parent in body plugin_obj_id"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
ok = ((org_placeholder.id != copied_placeholder.id))
self.assertTrue(ok, msg)
def test_copy_page_nested_plugin_moved_parent_plugin(self):
"""
Test to verify that page copy with a nested plugin works
when a plugin with child got moved to another placeholder
page one - 3 placeholder
col_sidebar:
1 text plugin
col_left: 1 text plugin with nested link plugin
col_right: no plugin
page two (copy target)
step2: move the col_left text plugin to col_right
col_sidebar:
1 text plugin
col_left: no plugin
col_right: 1 text plugin with nested link plugin
verify the copied page structure
"""
with SettingsOverride(CMS_PERMISSION=False):
templates = []
# setup page 1
page_one = create_page(u"Three Placeholder", u"col_three.html", u"en",
position=u"last-child", published=True, in_navigation=True)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# add the text plugin to placeholder one
text_plugin_en = add_plugin(page_one_ph_one, u"TextPlugin", u"en", body=u"Hello World")
self.assertEquals(text_plugin_en.id, CMSPlugin.objects.all()[0].id)
self.assertEquals(text_plugin_en.get_children().count(), 0)
pre_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(pre_add_plugin_count, 1)
# add a plugin to placeholder twho
pre_nesting_body = u"<p>the nested text plugin with a link inside</p>"
text_plugin_two = add_plugin(page_one_ph_two, u"TextPlugin", u"en", body=pre_nesting_body)
text_plugin_two = self.reload(text_plugin_two)
# prepare nestin plugin
page_one_ph_two = self.reload(page_one_ph_two)
text_plugin_two = self.reload(text_plugin_two)
link_plugin = add_plugin(page_one_ph_two, u"LinkPlugin", u"en", target=text_plugin_two)
link_plugin.name = u"django-cms Link"
link_plugin.url = u"https://www.django-cms.org"
link_plugin.parent = text_plugin_two
link_plugin.save()
# reload after every save
link_plugin = self.reload(link_plugin)
text_plugin_two = self.reload(text_plugin_two)
in_txt = u"""<img id="plugin_obj_%s" title="Link" alt="Link" src="/static/cms/images/plugins/link.png">"""
nesting_body = "%s<p>%s</p>" % (text_plugin_two.body, (in_txt % (link_plugin.id)))
# emulate the editor in admin that adds some txt for the nested plugin
text_plugin_two.body = nesting_body
text_plugin_two.save()
text_plugin_two = self.reload(text_plugin_two)
# the link is attached as a child?
self.assertEquals(text_plugin_two.get_children().count(), 1)
post_add_plugin_count = CMSPlugin.objects.count()
self.assertEqual(post_add_plugin_count, 3)
page_one.save()
# get the plugins from the original page
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# verify the plugins got created
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 2)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 0)
self.assertEquals(page_one.placeholders.count(), 3)
placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(placeholder_count, 3)
self.assertEquals(CMSPlugin.objects.count(), 3)
page_one_plugins = CMSPlugin.objects.all()
# setup page_copy_target
page_copy_target = create_page("Three Placeholder - page copy target", "col_three.html", "en",
position="last-child", published=True, in_navigation=True)
all_page_count = Page.objects.drafts().count()
pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertEquals(pre_copy_placeholder_count, 6)
superuser = self.get_superuser()
with self.login_user_context(superuser):
# now move the parent text plugin to another placeholder
post_data = {
'placeholder_id': page_one_ph_three.id,
'plugin_id': text_plugin_two.id,
'plugin_language':'en',
'plugin_parent':'',
}
edit_url = URL_CMS_MOVE_PLUGIN % page_one.id
response = self.client.post(edit_url, post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'ok')
# check if the plugin got moved
page_one = self.reload(page_one)
text_plugin_two = self.reload(text_plugin_two)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
# the plugin got moved and child got moved
self.assertEquals(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 2)
# copy the page
page_two = self.copy_page(page_one, page_copy_target)
# validate the expected pages,placeholders,plugins,pluginbodies
after_copy_page_plugin_count = CMSPlugin.objects.count()
self.assertEquals(after_copy_page_plugin_count, 6)
after_copy_page_count = Page.objects.drafts().count()
after_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()
self.assertGreater(after_copy_page_count, all_page_count, u"no new page after copy")
self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, u"plugin count is not grown")
self.assertGreater(after_copy_placeholder_count, pre_copy_placeholder_count,
u"placeholder count is not grown")
self.assertEquals(after_copy_page_count, 3, u"no new page after copy")
# validate the structure
# orginal placeholder
page_one = self.reload(page_one)
page_one_ph_one = page_one.placeholders.get(slot=u"col_sidebar")
page_one_ph_two = page_one.placeholders.get(slot=u"col_left")
page_one_ph_three = page_one.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_one_ph_one.page if page_one_ph_one else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_two.page if page_one_ph_two else None
self.assertEqual(found_page, page_one)
found_page = page_one_ph_three.page if page_one_ph_three else None
self.assertEqual(found_page, page_one)
page_two = self.reload(page_two)
page_two_ph_one = page_two.placeholders.get(slot=u"col_sidebar")
page_two_ph_two = page_two.placeholders.get(slot=u"col_left")
page_two_ph_three = page_two.placeholders.get(slot=u"col_right")
# check if there are multiple pages assigned to this placeholders
found_page = page_two_ph_one.page if page_two_ph_one else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_two.page if page_two_ph_two else None
self.assertEqual(found_page, page_two)
found_page = page_two_ph_three.page if page_two_ph_three else None
self.assertEqual(found_page, page_two)
# check the stored placeholders org vs copy
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk)
self.assertNotEquals(page_two_ph_one.pk, page_one_ph_one.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk)
self.assertNotEquals(page_two_ph_two.pk, page_one_ph_two.pk, msg)
msg = u'placehoder ids copy:%s org:%s copied page %s are identical - tree broken' % (
page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk)
self.assertNotEquals(page_two_ph_three.pk, page_one_ph_three.pk, msg)
# get the plugins from the original page
org_placeholder_one_plugins = page_one_ph_one.get_plugins()
self.assertEquals(len(org_placeholder_one_plugins), 1)
org_placeholder_two_plugins = page_one_ph_two.get_plugins()
self.assertEquals(len(org_placeholder_two_plugins), 0)
org_placeholder_three_plugins = page_one_ph_three.get_plugins()
self.assertEquals(len(org_placeholder_three_plugins), 2)
# get the plugins from the copied page
copied_placeholder_one_plugins = page_two_ph_one.get_plugins()
self.assertEquals(len(copied_placeholder_one_plugins), 1)
copied_placeholder_two_plugins = page_two_ph_two.get_plugins()
self.assertEquals(len(copied_placeholder_two_plugins), 0)
copied_placeholder_three_plugins = page_two_ph_three.get_plugins()
self.assertEquals(len(copied_placeholder_three_plugins), 2)
# verify the plugins got copied
# placeholder 1
count_plugins_copied = len(copied_placeholder_one_plugins)
count_plugins_org = len(org_placeholder_one_plugins)
msg = u"plugin count %s %s for placeholder one not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 2
count_plugins_copied = len(copied_placeholder_two_plugins)
count_plugins_org = len(org_placeholder_two_plugins)
msg = u"plugin count %s %s for placeholder two not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# placeholder 3
count_plugins_copied = len(copied_placeholder_three_plugins)
count_plugins_org = len(org_placeholder_three_plugins)
msg = u"plugin count %s %s for placeholder three not equal" % (count_plugins_copied, count_plugins_org)
self.assertEquals(count_plugins_copied, count_plugins_org, msg)
# verify the body of text plugin with nested link plugin
# org to copied
org_nested_text_plugin = None
# do this iteration to find the real text plugin with the attached link
# the inheritance mechanism for the cmsplugins works through
# (tuple)get_plugin_instance()
for x in org_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
org_nested_text_plugin = instance
break
copied_nested_text_plugin = None
for x in copied_placeholder_three_plugins:
if x.plugin_type == u"TextPlugin":
instance = x.get_plugin_instance()[0]
if instance.body.startswith(pre_nesting_body):
copied_nested_text_plugin = instance
break
msg = u"orginal nested text plugin not found"
self.assertNotEquals(org_nested_text_plugin, None, msg=msg)
msg = u"copied nested text plugin not found"
self.assertNotEquals(copied_nested_text_plugin, None, msg=msg)
# get the children ids of the texplugin with a nested link
# to check if the body of the text is generated correctly
org_link_child_plugin = org_nested_text_plugin.get_children()[0]
copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]
# validate the textplugin body texts
msg = u"org plugin and copied plugin are the same"
self.assertNotEqual(org_link_child_plugin.id, copied_link_child_plugin.id, msg)
needle = u"plugin_obj_%s"
msg = u"child plugin id differs to parent in body plugin_obj_id"
# linked child is in body
self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)
msg = u"copy: child plugin id differs to parent in body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)
# really nothing else
msg = u"child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)
msg = u"copy: child link plugin id differs to parent body plugin_obj_id"
self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)
# now reverse lookup the placeholders from the plugins
org_placeholder = org_link_child_plugin.placeholder
copied_placeholder = copied_link_child_plugin.placeholder
msg = u"placeholder of the orginal plugin and copied plugin are the same"
self.assertNotEqual(org_placeholder.id, copied_placeholder.id, msg)
|
{
"content_hash": "7a5516ed4db69ab3956c1e1b06643faf",
"timestamp": "",
"source": "github",
"line_count": 911,
"max_line_length": 119,
"avg_line_length": 51.23051591657519,
"alnum_prop": 0.589102440487669,
"repo_name": "11craft/django-cms",
"id": "431063e4d88b8066611f136094c6e21a408792b9",
"size": "46695",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/tests/nested_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "660853"
},
{
"name": "PHP",
"bytes": "2156"
},
{
"name": "Python",
"bytes": "2450732"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
"""Utility functions to parse specific values, usually from JSON responses."""
import re
def parse_timestamp(string):
"""Parse the timestamp that should be written in the standard format."""
timeformat = '%Y-%m-%dT%H:%M:%S.%f'
return datetime.datetime.strptime(string, timeformat)
def parse_float_value_from_text_stream(text, key):
"""Parse float value from the text.
Go through all lines of the text file, find the line with given key
and parse float value specified here.
"""
regexp = key + "\s*=\s*(\d.\d*)"
for line in text.split("\n"):
if line.startswith(key):
# the key was found, now try to find and parse the float value
match = re.fullmatch(regexp, line)
assert match is not None
assert match.lastindex == 1
return float(match.group(1))
def parse_token_clause(token_clause):
"""Parse the clause that could be either 'with', 'using', or 'without'."""
use_token = {"with": True,
"using": True,
"without": False}.get(token_clause)
if use_token is None:
raise Exception("Wrong clause specified: {t}".format(t=token_clause))
return use_token
|
{
"content_hash": "a39122ef0b5a5d00d869891aa20e823a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 35.73529411764706,
"alnum_prop": 0.6255144032921811,
"repo_name": "jpopelka/fabric8-analytics-common",
"id": "6be6351efca69fb71909ce669fe43df0148220ff",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integration-tests/features/src/parsing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "998"
},
{
"name": "Gherkin",
"bytes": "140658"
},
{
"name": "HTML",
"bytes": "25307"
},
{
"name": "Python",
"bytes": "354439"
},
{
"name": "Shell",
"bytes": "9619"
}
],
"symlink_target": ""
}
|
from uw_sws.section import (get_sections_by_instructor_and_term,
get_sections_by_delegate_and_term,
get_sections_by_curriculum_and_term,
get_sections_by_building_and_term,
get_changed_sections_by_term, get_section_by_url,
get_section_by_label, get_linked_sections,
get_joint_sections, get_prefetch_for_section_data,
is_a_term, is_b_term, is_full_summer_term,
is_valid_section_label)
|
{
"content_hash": "c25db94f4d889615ac0ff7861ce3d634",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 67.44444444444444,
"alnum_prop": 0.4958813838550247,
"repo_name": "uw-it-aca/uw-restclients",
"id": "fda0e9f7dadb32052fd7ed752dee3857fd9abb0c",
"size": "607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restclients/sws/section.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "38842"
},
{
"name": "Python",
"bytes": "664277"
},
{
"name": "Roff",
"bytes": "9566"
}
],
"symlink_target": ""
}
|
"""The VLC media player Telnet integration."""
from aiovlc.client import Client
from aiovlc.exceptions import AuthError, ConnectError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from .const import DATA_AVAILABLE, DATA_VLC, DOMAIN, LOGGER
PLATFORMS = [Platform.MEDIA_PLAYER]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up VLC media player Telnet from a config entry."""
config = entry.data
host = config[CONF_HOST]
port = config[CONF_PORT]
password = config[CONF_PASSWORD]
vlc = Client(password=password, host=host, port=port)
available = True
try:
await vlc.connect()
except ConnectError as err:
LOGGER.warning("Failed to connect to VLC: %s. Trying again", err)
available = False
if available:
try:
await vlc.login()
except AuthError as err:
await disconnect_vlc(vlc)
raise ConfigEntryAuthFailed() from err
domain_data = hass.data.setdefault(DOMAIN, {})
domain_data[entry.entry_id] = {DATA_VLC: vlc, DATA_AVAILABLE: available}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
entry_data = hass.data[DOMAIN].pop(entry.entry_id)
vlc = entry_data[DATA_VLC]
await disconnect_vlc(vlc)
return unload_ok
async def disconnect_vlc(vlc: Client) -> None:
"""Disconnect from VLC."""
LOGGER.debug("Disconnecting from VLC")
try:
await vlc.disconnect()
except ConnectError as err:
LOGGER.warning("Connection error: %s", err)
|
{
"content_hash": "44af2d3cd4bdab71cd65bf1ebcb4176b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 82,
"avg_line_length": 29.70149253731343,
"alnum_prop": 0.6914572864321608,
"repo_name": "toddeye/home-assistant",
"id": "9fe3b97ab3167dd5d2ac68a63d5e0d9d3e38a574",
"size": "1990",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vlc_telnet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import sys, os, subprocess, errno, re
from os.path import exists
TOOl_PATH = "/usr/bin/ipmitool"
try:
from subprocess import check_call
from subprocess import CalledProcessError
except ImportError:
def check_call(*popenargs, **kwargs):
import subprocess
retcode = subprocess.call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None: cmd = popenargs[0]
if retcode: raise CalledProcessError(retcode, cmd)
return retcode
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode ; self.cmd = cmd
def __str__(self): return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class Command:
def __init__(self,name,parent=None):
self.__name = name
self.__parent = parent
def __getattr__(self,name):
if name == "_print": name = "print"
return Command(name,self)
def __call__(self,*args):
class CommandOutput:
def __init__(self,ret,stdout,stderr):
self.stdout = stdout
self.stderr = stderr
self.ret = ret
cmd = self.__get_recursive_name() + list(args)
#print " ",cmd
popen = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
m = popen.communicate()
ret = popen.wait()
return CommandOutput(ret,*m)
def __get_recursive_name(self,sep=None):
m = self
l = []
while m is not None:
l.append(m.__name)
m = m.__parent
l.reverse()
if sep: return sep.join(l)
else: return l
def __str__(self):
return '<Command %r>'%self.__get_recursive_name(sep=" ")
def __repr__(self): return self.__str__()
ipmitool = Command("ipmitool")
def check_tool():
if exists(TOOl_PATH) == False:
print "Can not find ipmitool"
return False
def ping(args):
hostname = args.get("hostname")
usrname = args.get("usrname")
password = args.get("password")
if hostname == None:
print "No hostname"
return 1
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status")
if o.ret:
print o.stderr
return 1
else:
print o.stdout
return 0
def boot_dev(args):
hostname = args.get("hostname")
usrname = args.get("usrname")
password = args.get("password")
dev = args.get("dev")
if hostname == None:
print "No hostname"
return 1
if dev == None:
print "No boot device specified"
return 1
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "bootdev", dev)
if o.ret:
print o.stderr
return 1
else:
return 0
def reboot(args):
hostname = args.get("hostname")
usrname = args.get("usrname")
password = args.get("password")
if hostname == None:
print "No hostname"
return 1
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status")
if o.ret:
print o.stderr
return 1
if "is on" in o.stdout:
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "cycle")
else:
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "reset")
if o.ret:
print o.stderr
return 1
else:
return 0
def power(args):
hostname = args.get("hostname")
usrname = args.get("usrname")
password = args.get("password")
action = args.get("action")
if hostname == None:
print "No hostname"
return 1
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", action)
if o.ret:
print o.stderr
return 1
else:
return 0
def boot_or_reboot(args):
hostname = args.get("hostname")
usrname = args.get("usrname")
password = args.get("password")
o = ipmitool("-H", hostname, "-U", usrname, "-P", password, "chassis", "power", "status")
if o.ret:
print o.stderr
return 1
if "is on" in o.stdout:
return reboot(args)
elif "is off" in o.stdout:
args["action"] = "on"
return power(args)
else:
print "unknown power status:" + o.stdout
return 1
call_table = {"ping":ping, "boot_dev":boot_dev, "reboot":reboot, "power":power, "boot_or_reboot":boot_or_reboot}
def dispatch(args):
cmd = args[1]
params = args[2:]
func_params = {}
if call_table.has_key(cmd) == False:
print "No function %s" % cmd
return 1
for p in params:
pairs = p.split("=")
if len(pairs) != 2:
print "Invalid parameter %s" % p
return 1
func_params[pairs[0]] = pairs[1]
func = call_table[cmd]
return func(func_params)
if __name__ == "__main__":
if check_tool() == False:
sys.exit(1)
if len(sys.argv) < 2:
print "Not enough arguments, at least one"
sys.exit(1)
sys.exit(dispatch(sys.argv))
|
{
"content_hash": "ec00e3642b993f874a347f8d4d257131",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 112,
"avg_line_length": 26.695876288659793,
"alnum_prop": 0.5558988221664414,
"repo_name": "jcshen007/cloudstack",
"id": "c9e9a66d4f801bbe1047b1a7629b7ba345aff465",
"size": "5985",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scripts/util/ipmi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1451"
},
{
"name": "Batchfile",
"bytes": "11926"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "336634"
},
{
"name": "FreeMarker",
"bytes": "4917"
},
{
"name": "Groovy",
"bytes": "153137"
},
{
"name": "HTML",
"bytes": "151248"
},
{
"name": "Java",
"bytes": "34084304"
},
{
"name": "JavaScript",
"bytes": "7687141"
},
{
"name": "Python",
"bytes": "11154323"
},
{
"name": "Ruby",
"bytes": "896"
},
{
"name": "Shell",
"bytes": "770550"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import cobra.models.fields.bounded
class Migration(migrations.Migration):
dependencies = [
('project', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='svn_password',
field=models.CharField(default='admin', max_length=128, verbose_name='password'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='svn_url',
field=models.URLField(default='http://url.com/', verbose_name='url'),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='svn_username',
field=models.CharField(default='admin', max_length=30, verbose_name='username'),
preserve_default=False,
),
migrations.AddField(
model_name='projectkey',
name='status',
field=cobra.models.fields.bounded.BoundedPositiveIntegerField(default=0, db_index=True, choices=[(0, 'Active'), (1, 'Inactive')]),
preserve_default=True,
),
]
|
{
"content_hash": "fdf34741325ad2045d8dbced2e285098",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 142,
"avg_line_length": 32.21052631578947,
"alnum_prop": 0.579248366013072,
"repo_name": "lyoniionly/django-cobra",
"id": "7b091ea6e67138ef3e1d656b5e83737bb0986326",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cobra/apps/project/migrations/0002_auto_20150210_1315.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "745958"
},
{
"name": "HTML",
"bytes": "254436"
},
{
"name": "JavaScript",
"bytes": "2679541"
},
{
"name": "Python",
"bytes": "1440198"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "24882"
}
],
"symlink_target": ""
}
|
import sys
import subprocess
import time
import unittest
if sys.version_info[0] == 2:
_unicode_prefix = 'u'
else:
_unicode_prefix = ''
class ExamplesTest(unittest.TestCase):
def test_helloworld(self, example="helloworld.py"):
p = subprocess.Popen([example], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
p.wait()
output = [l.strip() for l in p.stdout]
self.assertEqual(output, ['Hello World!'])
def test_helloworld_direct(self):
self.test_helloworld('helloworld_direct.py')
def test_helloworld_blocking(self):
self.test_helloworld('helloworld_blocking.py')
def test_helloworld_tornado(self):
self.test_helloworld('helloworld_tornado.py')
def test_helloworld_direct_tornado(self):
self.test_helloworld('helloworld_direct_tornado.py')
def test_simple_send_recv(self, recv='simple_recv.py', send='simple_send.py'):
r = subprocess.Popen([recv], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
s = subprocess.Popen([send], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
s.wait()
r.wait()
actual = [l.strip() for l in r.stdout]
expected = ["{%s'sequence': %i}" % (_unicode_prefix, (i+1)) for i in range(100)]
self.assertEqual(actual, expected)
def test_client_server(self, client=['client.py'], server=['server.py'], sleep=0):
s = subprocess.Popen(server, stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
if sleep:
time.sleep(sleep)
c = subprocess.Popen(client, stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
c.wait()
s.terminate()
actual = [l.strip() for l in c.stdout]
inputs = ["Twas brillig, and the slithy toves",
"Did gire and gymble in the wabe.",
"All mimsy were the borogroves,",
"And the mome raths outgrabe."]
expected = ["%s => %s" % (l, l.upper()) for l in inputs]
self.assertEqual(actual, expected)
def test_sync_client_server(self):
self.test_client_server(client=['sync_client.py'])
def test_client_server_tx(self):
self.test_client_server(server=['server_tx.py'])
def test_sync_client_server_tx(self):
self.test_client_server(client=['sync_client.py'], server=['server_tx.py'])
def test_client_server_direct(self):
self.test_client_server(client=['client.py', '-a', 'localhost:8888/examples'], server=['server_direct.py'], sleep=0.5)
def test_sync_client_server_direct(self):
self.test_client_server(client=['sync_client.py', '-a', 'localhost:8888/examples'], server=['server_direct.py'], sleep=0.5)
def test_db_send_recv(self):
self.maxDiff = None
# setup databases
subprocess.check_call(['db_ctrl.py', 'init', './src_db'])
subprocess.check_call(['db_ctrl.py', 'init', './dst_db'])
fill = subprocess.Popen(['db_ctrl.py', 'insert', './src_db'],
stdin=subprocess.PIPE, stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
for i in range(100):
fill.stdin.write("Message-%i\n" % (i+1))
fill.stdin.close()
fill.wait()
# run send and recv
r = subprocess.Popen(['db_recv.py', '-m', '100'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
s = subprocess.Popen(['db_send.py', '-m', '100'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
s.wait()
r.wait()
# verify output of receive
actual = [l.strip() for l in r.stdout]
expected = ["inserted message %i" % (i+1) for i in range(100)]
self.assertEqual(actual, expected)
# verify state of databases
v = subprocess.Popen(['db_ctrl.py', 'list', './dst_db'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
v.wait()
expected = ["(%i, %s'Message-%i')" % ((i+1), _unicode_prefix, (i+1)) for i in range(100)]
actual = [l.strip() for l in v.stdout]
self.assertEqual(actual, expected)
def test_tx_send_tx_recv(self):
self.test_simple_send_recv(recv='tx_recv.py', send='tx_send.py')
def test_simple_send_direct_recv(self):
self.maxDiff = None
r = subprocess.Popen(['direct_recv.py', '-a', 'localhost:8888'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(0.5)
s = subprocess.Popen(['simple_send.py', '-a', 'localhost:8888'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
s.wait()
r.wait()
actual = [l.strip() for l in r.stdout]
expected = ["{%s'sequence': %i}" % (_unicode_prefix, (i+1)) for i in range(100)]
self.assertEqual(actual, expected)
def test_direct_send_simple_recv(self):
s = subprocess.Popen(['direct_send.py', '-a', 'localhost:8888'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(0.5)
r = subprocess.Popen(['simple_recv.py', '-a', 'localhost:8888'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
r.wait()
s.wait()
actual = [l.strip() for l in r.stdout]
expected = ["{%s'sequence': %i}" % (_unicode_prefix, (i+1)) for i in range(100)]
self.assertEqual(actual, expected)
|
{
"content_hash": "bde4e83d1480bdffcdccec44b940f0ef",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 131,
"avg_line_length": 45.083333333333336,
"alnum_prop": 0.5844395899848764,
"repo_name": "Karm/qpid-proton",
"id": "3d97b97aff906d5e67e578ad092906600eecbd29",
"size": "6741",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/python/test_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1234930"
},
{
"name": "C++",
"bytes": "312016"
},
{
"name": "CMake",
"bytes": "89386"
},
{
"name": "Go",
"bytes": "192743"
},
{
"name": "Groff",
"bytes": "420"
},
{
"name": "HTML",
"bytes": "8169"
},
{
"name": "Java",
"bytes": "1790509"
},
{
"name": "JavaScript",
"bytes": "244212"
},
{
"name": "PHP",
"bytes": "31076"
},
{
"name": "Perl",
"bytes": "100876"
},
{
"name": "Perl6",
"bytes": "878"
},
{
"name": "Python",
"bytes": "628676"
},
{
"name": "Ruby",
"bytes": "335237"
},
{
"name": "Shell",
"bytes": "11599"
}
],
"symlink_target": ""
}
|
from haas.test_common import config_testsuite, config_merge, \
fresh_database
from haas.config import load_extensions
from haas.flaskapp import app
from haas.model import db
from haas.migrations import create_db
from haas.ext.network_allocators.vlan_pool import Vlan
import pytest
@pytest.fixture
def configure():
config_testsuite()
config_merge({
'extensions': {
'haas.ext.network_allocators.null': None,
'haas.ext.network_allocators.vlan_pool': ''
},
'haas.ext.network_allocators.vlan_pool': {
'vlans': '100-104, 300, 702', # Arbitrary list
},
})
load_extensions()
fresh_database = pytest.fixture(fresh_database)
pytestmark = pytest.mark.usefixtures('configure',
'fresh_database',
)
def test_populate_dirty_db():
"""running the allocator's populate() on an existing db should be ok.
This includes the case where modifications have been made to the vlans
in the database.
Note that we only check that this doesn't raise an exception.
"""
# The fresh_database fixture will have created a clean database for us. We
# just tweak it and then re-run create_db
with app.app_context():
# flag vlan 100 as in-use, just so the db isn't quite pristine.
vlan100 = Vlan.query.filter_by(vlan_no=100)
vlan100.available = False
db.session.commit()
# Okay, now try re-initializing:
create_db()
|
{
"content_hash": "548b29816606e3341645bba596d3c81b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.640604467805519,
"repo_name": "henn/haas",
"id": "7e86d2fbc940c9c5dc986f17541a15e86212df56",
"size": "1522",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit/ext/network_allocators/vlan_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "451"
},
{
"name": "Python",
"bytes": "352176"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Red Hat Support Library'
copyright = u'2013, Keith Robertson, Nigel Jones, Spenser Shumaker, Dan Varga'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'RedHatSupportLibrarydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RedHatSupportLibrary.tex', u'Red Hat Support Library Documentation',
u'Keith Robertson, Nigel Jones, Spenser Shumaker, Dan Varga', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
skipstrs = [ "gds_", "superclass", "hasContent", "factory", "export", "build" ]
def skip(app, what, name, obj, skip, options):
for skipstr in skipstrs:
if name.startswith(skipstr):
return True
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
|
{
"content_hash": "977d67f7788d470a740ec04a6ee85392",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 81,
"avg_line_length": 33.197916666666664,
"alnum_prop": 0.7091308440539692,
"repo_name": "redhataccess/redhat-support-lib-python",
"id": "48ed2e18065907233bd309ad6cfaa9520b01c178",
"size": "6808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2194"
},
{
"name": "Python",
"bytes": "1083764"
}
],
"symlink_target": ""
}
|
import vaex
import numpy as np
def plot2d_tensor(self, x, y, vx, vy, shape=16, limits=None, delay=None, show=False, normalize=False, selection=None,
facecolor='green', alpha=0.5, edgecolor='black', scale=1., min_count=0):
import matplotlib.pylab as plt
shape = vaex.dataset._expand_shape(shape, 2)
@vaex.delayed
def on_cov(limits, count, cov):
# cov[:,:,0,0] = 1
# cov[:,:,1,1] = 2.1
# cov[:,:,0,1] = cov[:,:,1,0] = (cov[:,:,1,1] * cov[:,:,0,0]) **0.5* -0.8
if normalize:
length = (cov[:, :, 0, 0] + cov[:, :, 1, 1])
with np.errstate(divide='ignore', invalid='ignore'):
cov = (cov.T / length.T).T
x_centers = self.bin_centers(x, limits[0], shape=shape[0])
y_centers = self.bin_centers(y, limits[1], shape=shape[1])
X, Y = np.meshgrid(x_centers, y_centers, indexing='ij')
X = X.flatten()
Y = Y.flatten()
count = count.flatten()
cov = cov.reshape((-1,) + cov.shape[-2:])
axes = plt.gca()
fig = plt.gcf()
width, height = fig.canvas.get_width_height()
max_size = min(width, height)
max_length = (np.nanmax(cov[:, 0, 0] + cov[:, 1, 1]))**0.5
scaling_x = 1 / max_length * width / shape[0] # (width*shape[0])
scaling_y = 1 / max_length * height / shape[1] # (height*shape[1])
scaling = min(scaling_x, scaling_y)
for i in range(len(X)):
if not np.all(np.isfinite(cov[i])):
continue
if count[i] < min_count:
continue
eigen_values, eigen_vectors = np.linalg.eig(cov[i])
indices = np.argsort(eigen_values)[::-1]
v1 = eigen_vectors[:, indices[0]] # largest eigen vector
scale_dispersion = 1.
device_width = (np.sqrt(np.max(eigen_values))) * scaling * scale
device_height = (np.sqrt(np.min(eigen_values))) * scaling * scale
varx = cov[i, 0, 0]
vary = cov[i, 1, 1]
angle = np.arctan2(v1[1], v1[0])
e = ellipse(xy=(X[i], Y[i]), width=device_width, height=device_height, angle=np.degrees(angle),
scale=scale_dispersion,
alpha=alpha, facecolor=facecolor, edgecolor=edgecolor) # rand()*360
axes.add_artist(e)
if show:
plt.show()
return
@vaex.delayed
def on_limits(limits):
# we add them to really count, i.e. if one of them is missing, it won't be counted
count = self.count(vx + vy, binby=['x', 'y'], limits=limits, shape=shape, selection=selection, delay=True)
cov = self.cov([vx, vy], binby=['x', 'y'], limits=limits, shape=shape, selection=selection, delay=True)
return on_cov(limits, count, cov)
task = on_limits(self.limits([x, y], limits, selection=selection, delay=True))
return self._delay(self._use_delay(delay), task)
def ellipse(*args, **kwargs):
# for import performance reasons we don't import it globally
import matplotlib.artist as artist
import matplotlib.transforms as transforms
import matplotlib.patches as patches
from matplotlib.path import Path
class DispersionEllipse(patches.Patch):
"""
This ellipse has it's center in user coordinates, and the width and height in device coordinates
such that is is not deformed
"""
def __str__(self):
return "DispersionEllipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
# @docstring.dedent_interpd
def __init__(self, xy, width, height, scale=1.0, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
patches.Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.scale = scale
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.width # self.convert_xunits(self.width)
height = self.height # self.convert_yunits(self.height)
trans = artist.Artist.get_transform(self)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5 * self.scale, height * 0.5 * self.scale) \
.rotate_deg(self.angle) \
.translate(*trans.transform(center))
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform()
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
def contains(self, ev):
if ev.x is None or ev.y is None:
return False, {}
x, y = self.get_transform().inverted().transform_point((ev.x, ev.y))
return (x * x + y * y) <= 1.0, {}
return DispersionEllipse(*args, **kwargs)
|
{
"content_hash": "55ce24a3236f458011b4cb63f10cef35",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 117,
"avg_line_length": 40.27450980392157,
"alnum_prop": 0.5418695228821812,
"repo_name": "maartenbreddels/vaex",
"id": "6d2c909731cf810d4dca214c1c5b9035b813e62d",
"size": "6162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/vaex-viz/vaex/viz/tensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import copy
import os
import re
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import connection, connections, transaction
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, TestCase, TransactionTestCase, override_settings,
)
from django.test.signals import setting_changed
from django.utils import six, timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control,
patch_response_headers, patch_vary_headers,
)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
from .models import Poll, expensive_calculation
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = {k: base.copy() for k in _caches_setting_base.keys()}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertEqual(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertDictEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = six.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertIsNone(cache.get("key1"))
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
memcached_never_expiring_params = memcached_params.copy()
memcached_never_expiring_params['TIMEOUT'] = None
memcached_far_future_params = memcached_params.copy()
memcached_far_future_params['TIMEOUT'] = 31536000 # 60*60*24*365, 1 year
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache_config in settings.CACHES.items():
if cache_config['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(base=memcached_never_expiring_params))
def test_default_never_expiring_timeout(self):
# Regression test for #22845
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
@override_settings(CACHES=caches_setting_for_tests(base=memcached_far_future_params))
def test_default_far_future_timeout(self):
# Regression test for #22845
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
cache.set('small_value', large_value)
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super(FileBasedCacheTests, self).tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(TestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
{
"content_hash": "ee4bee4c46c8365548d2aa338763f9b9",
"timestamp": "",
"source": "github",
"line_count": 2106,
"max_line_length": 121,
"avg_line_length": 39.6951566951567,
"alnum_prop": 0.6262470394028565,
"repo_name": "devops2014/djangosite",
"id": "c6e76c417c6841369d96f34fc4f60e3bfdccb658",
"size": "83764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/cache/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52479"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10489293"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
import argparse
import pdb
import traceback
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
@dataclass
class Instruction:
name: str
reg: Optional[str]
offset: Optional[int]
def __str__(self) -> str:
if self.name in ["hlf", "tpl", "inc"]:
return f"{self.name} {self.reg}"
elif self.name in ["jie", "jio"]:
return f"{self.name} {self.reg}, {self.offset}"
elif self.name in ["jmp"]:
return f"{self.name} {self.offset}"
else:
raise ValueError(f"Unkown instruction: {self.name}")
def execute(program: List[Instruction], regs: Dict[str, int], verbose=False) -> Dict[str, int]:
ip: int = 0
while True:
if ip >= len(program):
break
inst = program[ip]
if verbose:
print(f"{ip:05d}: {inst!s} {regs}")
if inst.name == "hlf": # half r
regs[inst.reg] = regs[inst.reg] // 2
ip += 1
elif inst.name == "tpl": # triple r
regs[inst.reg] = regs[inst.reg] * 3
ip += 1
elif inst.name == "inc": # increment r
regs[inst.reg] = regs[inst.reg] + 1
ip += 1
elif inst.name == "jmp": # jump
ip += inst.offset
elif inst.name == "jie": # jump if r is even
if regs[inst.reg] % 2 == 0:
ip += inst.offset
else:
ip += 1
elif inst.name == "jio": # jump if r is one
if regs[inst.reg] == 1:
ip += inst.offset
else:
ip += 1
if verbose:
print(f"final state: ip:{ip} regs:{regs}")
return regs
def solve(program: List[Instruction], verbose=False) -> Tuple[int, int]:
regs: Dict[str, int] = {"a": 0, "b": 0}
regs = execute(program, regs, verbose)
one = regs["b"]
regs: Dict[str, int] = {"a": 1, "b": 0}
regs = execute(program, regs, verbose)
two = regs["b"]
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Advent of Code - 2015 - Day 23 - Opening the Turing Lock."
)
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"--verbose",
action="store_true",
default=False,
help="Display extra info. (Default: %(default)s)",
)
args = parser.parse_args()
program: List[Instruction] = []
with open(args.input) as inf:
for line in inf:
parts = line.strip().split()
if parts[0] in ["inc", "tpl", "hlf"]:
inst = Instruction(parts[0], parts[1], None)
elif parts[0] in ["jie", "jio"]:
inst = Instruction(parts[0], parts[1][0], int(parts[2]))
elif parts[0] in ["jmp"]:
inst = Instruction(parts[0], None, int(parts[1]))
else:
raise ValueError("unknown instruction '{line.strip()}'")
program.append(inst)
try:
print(solve(program, verbose=args.verbose))
except Exception:
traceback.print_exc()
pdb.post_mortem()
|
{
"content_hash": "d03f3e42ee37587bc4ab963a4063256e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 95,
"avg_line_length": 29.276785714285715,
"alnum_prop": 0.5114364135407137,
"repo_name": "BrendanLeber/adventofcode",
"id": "ca437d905b6c26a81af3fb014e39b7bd82767d8b",
"size": "3304",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "2015/23-opening_lock/solve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62564"
}
],
"symlink_target": ""
}
|
"""
EasyBuild support for iqacml compiler toolchain (includes Intel compilers, QLogicMPI, ACML, BLACS, ScaLAPACK and FFTW).
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.inteliccifort import IntelIccIfort
from easybuild.toolchains.fft.fftw import Fftw
from easybuild.toolchains.linalg.acml import Acml
from easybuild.toolchains.linalg.scalapack import ScaLAPACK
from easybuild.toolchains.mpi.qlogicmpi import QLogicMPI
class Iqacml(IntelIccIfort, QLogicMPI, Acml, ScaLAPACK, Fftw):
"""Compiler toolchain with Intel compilers, QLogic MPI, ACML, ScaLAPACK and FFTW."""
NAME = 'iqacml'
|
{
"content_hash": "d3f26e5abc13d3b00fd771e1ff78e08a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 119,
"avg_line_length": 39.375,
"alnum_prop": 0.8,
"repo_name": "ULHPC/modules",
"id": "9529746985d5c02f46c2b4310f6c8feec8fe7245",
"size": "1688",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "easybuild/easybuild-framework/easybuild/toolchains/iqacml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "36174"
},
{
"name": "Perl",
"bytes": "34780"
},
{
"name": "Python",
"bytes": "2711250"
},
{
"name": "Ruby",
"bytes": "932"
},
{
"name": "Shell",
"bytes": "51560"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/reverse_engineering/shared_booster_analysis_tool.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "94bb91396ebf3394907b27bca49328f3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 102,
"avg_line_length": 25.46153846153846,
"alnum_prop": 0.7099697885196374,
"repo_name": "anhstudios/swganh",
"id": "ad347f5b445420f2fc4f6a65d936cd02d6d94407",
"size": "476",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/space/reverse_engineering/shared_booster_analysis_tool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TransactionStreamAmount(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'amount': (float,), # noqa: E501
'iso_currency_code': (str, none_type,), # noqa: E501
'unofficial_currency_code': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'amount': 'amount', # noqa: E501
'iso_currency_code': 'iso_currency_code', # noqa: E501
'unofficial_currency_code': 'unofficial_currency_code', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""TransactionStreamAmount - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
amount (float): Represents the numerical value of an amount.. [optional] # noqa: E501
iso_currency_code (str, none_type): The ISO-4217 currency code of the amount. Always `null` if `unofficial_currency_code` is non-`null`. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s.. [optional] # noqa: E501
unofficial_currency_code (str, none_type): The unofficial currency code of the amount. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "f1c916ee920435fce5eb21bb293ae293",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 338,
"avg_line_length": 43.72727272727273,
"alnum_prop": 0.5752338877338877,
"repo_name": "plaid/plaid-python",
"id": "04405642bce60b0d1e6f78483818ad6709b81331",
"size": "7696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaid/model/transaction_stream_amount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "323"
},
{
"name": "Makefile",
"bytes": "622"
},
{
"name": "Mustache",
"bytes": "125163"
},
{
"name": "Python",
"bytes": "9342874"
}
],
"symlink_target": ""
}
|
from bokeh.layouts import column
from bokeh.models.widgets import Div
from bokeh.models import HoverTool, ColumnDataSource, PrintfTickFormatter
from bokeh.models import LinearColorMapper, ColorBar
from dashboard.bokeh.plots.descriptors.table import Table
from dashboard.bokeh.plots.descriptors.title import Title
from qlf_models import QLFModels
from dashboard.bokeh.helper import get_palette
import logging
from bokeh.resources import CDN
from bokeh.embed import file_html
logger = logging.getLogger(__name__)
class Flat:
def __init__(self, process_id, arm, spectrograph):
self.selected_process_id = process_id
self.selected_arm = arm
self.selected_spectrograph = spectrograph
def load_qa(self):
cam = self.selected_arm+str(self.selected_spectrograph)
mergedqa = QLFModels().get_output(self.selected_process_id, cam)
check_flat = mergedqa['TASKS']['CHECK_FIBERFLAT']
flat = check_flat['METRICS']
nrg = check_flat['PARAMS']['CHECKFLAT_NORMAL_RANGE']
wrg = check_flat['PARAMS']['CHECKFLAT_WARN_RANGE']
info_col = Title().write_description('fiberflat')
# Prepare tables
current_exposures = check_flat['METRICS']['CHECKFLAT']
reference_exposures = check_flat['PARAMS']['CHECKFLAT_REF']
keynames = ["CHECKFLAT"]
table = Table().single_table(keynames, [current_exposures], reference_exposures,
nrg, wrg)
layout = column(info_col, Div(),
table, Div(),
css_classes=["display-grid"])
return file_html(layout, CDN, "FIBERFLAT")
|
{
"content_hash": "acd521056ee46febead23d27abb8f653",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 89,
"avg_line_length": 33.93877551020408,
"alnum_prop": 0.6632591701743836,
"repo_name": "desihub/qlf",
"id": "acc40831c175fce738067372ca956f256f92cac9",
"size": "1663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/framework/qlf/dashboard/bokeh/qacheckflat/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "440"
},
{
"name": "Dockerfile",
"bytes": "2304"
},
{
"name": "HTML",
"bytes": "3969"
},
{
"name": "JavaScript",
"bytes": "390225"
},
{
"name": "Jupyter Notebook",
"bytes": "50033"
},
{
"name": "Python",
"bytes": "306541"
},
{
"name": "Shell",
"bytes": "6807"
}
],
"symlink_target": ""
}
|
from sqlalchemy.engine import Connection
from time import time
from thread import get_ident
from weakref import ref
thread_query_logs = {}
def _time(fn):
start = time()
fn()
end = time()
return end - start
# wrap _cursor_execute, _cursor_executemany w/ timing code
o1, o2 = Connection._cursor_execute, Connection._cursor_executemany
def _cursor_execute(self, c, statement, *args, **kwargs):
t = _time(lambda: o1(self, c, statement, *args, **kwargs))
query_log = thread_query_logs.get(get_ident(), None)
if query_log:
query_log().queries.append((statement, t))
def _cursor_executemany(self, c, statement, *args, **kwargs):
t = _time(lambda: o2(self, c, statement, *args, **kwargs))
query_log = thread_query_logs.get(get_ident(), None)
if query_log:
query_log().queries.append((statement, t))
Connection._cursor_execute = _cursor_execute
Connection._cursor_executemany = _cursor_executemany
class QueryLog(object):
"""Query logger for SQLAlchemy.
Very simple at the moment. Logging is done by thread, so be sure to
create and release logger objects at the beginning and end of your
requests. I apologize for this kludge, but there doesn't seem to be any
better way to hack into SQLAlchemy with the same precision.
Queries are stored in self.queries as a list of (query, time) tuples.
"""
def __init__(self):
"""Creates a new query logger and registers it to this thread."""
self.queries = []
thread_query_logs[get_ident()] = ref(self)
def __del__(self):
"""Removes the logger for this thread."""
try:
del thread_query_logs[get_ident()]
except KeyError:
pass
def time_elapsed(self):
"""Returns the total time spent in SQL since this logger was created.
"""
total_time = 0
for query, time in self.queries:
total_time += time
return total_time
|
{
"content_hash": "a52d694ffea0c99a0509d2b8a52f1ad2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 33.40677966101695,
"alnum_prop": 0.6484018264840182,
"repo_name": "NetShepsky/Ferrox",
"id": "f2442fe4d9e8519aa887c91d62ddbf3562041955",
"size": "2069",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ferrox/lib/querylog.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
This is an example dag for using `ImapAttachmentToS3Operator` to transfer an email attachment via IMAP
protocol from a mail server to S3 Bucket.
"""
from datetime import datetime
from os import getenv
from airflow import DAG
from airflow.providers.amazon.aws.transfers.imap_attachment_to_s3 import ImapAttachmentToS3Operator
IMAP_ATTACHMENT_NAME = getenv("IMAP_ATTACHMENT_NAME", "test.txt")
IMAP_MAIL_FOLDER = getenv("IMAP_MAIL_FOLDER", "INBOX")
IMAP_MAIL_FILTER = getenv("IMAP_MAIL_FILTER", "All")
S3_BUCKET = getenv("S3_BUCKET", "test-bucket")
S3_KEY = getenv("S3_KEY", "key")
with DAG(
dag_id="example_imap_attachment_to_s3",
start_date=datetime(2021, 1, 1),
schedule_interval=None,
catchup=False,
tags=['example'],
) as dag:
# [START howto_transfer_imap_attachment_to_s3]
task_transfer_imap_attachment_to_s3 = ImapAttachmentToS3Operator(
task_id='transfer_imap_attachment_to_s3',
imap_attachment_name=IMAP_ATTACHMENT_NAME,
s3_bucket=S3_BUCKET,
s3_key=S3_KEY,
imap_mail_folder=IMAP_MAIL_FOLDER,
imap_mail_filter=IMAP_MAIL_FILTER,
)
# [END howto_transfer_imap_attachment_to_s3]
|
{
"content_hash": "48a62b97884f761f6516ca0c772ca1f3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 102,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.7090443686006825,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "357d92a6f5694d9cf749d0263708a61d15d12650",
"size": "1958",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/example_dags/example_imap_attachment_to_s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
"""OpenGL-ctypes setup script (setuptools-based)
"""
import sys, os
extra_commands = {}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.hexversion >= 0x3000000:
try:
from distutils.command.build_py import build_py_2to3
extra_commands['build_py'] = build_py_2to3
except ImportError:
pass
sys.path.insert(0, '.' )
import metadata
def is_package( path ):
return os.path.isfile( os.path.join( path, '__init__.py' ))
def find_packages( root ):
"""Find all packages under this directory"""
for path, directories, files in os.walk( root ):
if is_package( path ):
yield path.replace( '/','.' )
requirements = []
if sys.hexversion < 0x2050000:
requirements.append( 'ctypes' )
from distutils.command.install_data import install_data
class smart_install_data(install_data):
def run(self):
#need to change self.install_dir to the library dir
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
# should create the directory if it doesn't exist!!!
return install_data.run(self)
extra_commands['install_data'] = smart_install_data
if sys.platform == 'win32':
# binary versions of GLUT and GLE for Win32 (sigh)
DLL_DIRECTORY = os.path.join('OpenGL','DLLS')
datafiles = [
(
DLL_DIRECTORY, [
os.path.join( DLL_DIRECTORY,file)
for file in os.listdir( DLL_DIRECTORY )
if os.path.isfile( file )
]
),
]
else:
datafiles = []
if __name__ == "__main__":
setup(
name = "PyOpenGL",
packages = list( find_packages('OpenGL') ),
description = 'Standard OpenGL bindings for Python',
options = {
'sdist': {
'formats': ['gztar','zip'],
'force_manifest': True,
},
},
data_files = datafiles,
cmdclass = extra_commands,
use_2to3 = True,
**metadata.metadata
)
|
{
"content_hash": "2f32e5bc96ad76437f00acde54cd3476",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 63,
"avg_line_length": 29.52112676056338,
"alnum_prop": 0.5906488549618321,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "e6f076cd45e8da3ffc96e7b244d1df7695111c8c",
"size": "2119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.sites.models import get_current_site
from django.utils.translation import ugettext_lazy as _
from .models import Hub, HubUser
from .utils import create_hub
from .backends import InvitationBackend
class HubForm(forms.ModelForm):
"""Form class for updating Hubs"""
owner = forms.ModelChoiceField(HubUser.objects.all())
def __init__(self, request, *args, **kwargs):
self.request = request
super(HubForm, self).__init__(*args, **kwargs)
self.fields['owner'].queryset = self.instance.hub_users.filter(
is_admin=True, user__is_active=True)
self.fields['owner'].initial = self.instance.owner.hub_user
class Meta:
model = Hub
exclude = ('users', 'is_enabled')
def save(self, commit=True):
if self.instance.owner.hub_user != self.cleaned_data['owner']:
self.instance.owner.hub_user = self.cleaned_data['owner']
self.instance.owner.save()
return super(HubForm, self).save(commit=commit)
def clean_owner(self):
owner = self.cleaned_data['owner']
if owner != self.instance.owner.hub_user:
if self.request.user != self.instance.owner.hub_user.user:
raise forms.ValidationError(
_("Only the hub owner can change ownerhip"))
return owner
class HubUserForm(forms.ModelForm):
"""Form class for updating HubUsers"""
class Meta:
model = HubUser
exclude = ('hub', 'user')
def clean_is_admin(self):
is_admin = self.cleaned_data['is_admin']
if self.instance.hub.owner.hub_user == self.instance and not is_admin:
raise forms.ValidationError(_("The hub owner must be an admin"))
return is_admin
class HubUserAddForm(forms.ModelForm):
"""Form class for adding HubUsers to an existing Hub"""
email = forms.EmailField(max_length=75)
def __init__(self, request, hub, *args, **kwargs):
self.request = request
self.hub = hub
super(HubUserAddForm, self).__init__(*args, **kwargs)
class Meta:
model = HubUser
exclude = ('user', 'hub')
def save(self, *args, **kwargs):
"""
The save method should create a new HubUser linking the User
matching the provided email address. If not matching User is found it
should kick off the registration process. It needs to create a User in
order to link it to the Hub.
"""
try:
user = get_user_model().objects.get(
email__iexact=self.cleaned_data['email'])
except get_user_model().MultipleObjectsReturned:
raise forms.ValidationError(
_("This email address has been used multiple times."))
except get_user_model().DoesNotExist:
user = InvitationBackend().invite_by_email(
self.cleaned_data['email'],
**{'domain': get_current_site(self.request),
'hub': self.hub})
return HubUser.objects.create(
user=user, hub=self.hub, is_admin=self.cleaned_data['is_admin'])
def clean_email(self):
email = self.cleaned_data['email']
if self.hub.users.filter(email=email):
raise forms.ValidationError(_("There is already an hub "
"member with this email address!"))
return email
class HubAddForm(forms.ModelForm):
"""
Form class for creating a new hub, complete with new owner, including a
User instance, HubUser instance, and HubOwner instance.
"""
email = forms.EmailField(
max_length=75, help_text=_("The email address for the account owner"))
def __init__(self, request, *args, **kwargs):
self.request = request
super(HubAddForm, self).__init__(*args, **kwargs)
class Meta:
model = Hub
exclude = ('users', 'is_enabled')
def save(self, **kwargs):
"""
Create the hub, then get the user, then make the owner.
"""
is_enabled = True
try:
user = get_user_model().objects.get(
email=self.cleaned_data['email'])
except get_user_model().DoesNotExist:
user = InvitationBackend().invite_by_email(
self.cleaned_data['email'],
**{'domain': get_current_site(self.request),
'hub': self.cleaned_data['name'],
'sender': self.request.user, 'created': True})
is_enabled = False
return create_hub(
user, self.cleaned_data['name'],
self.cleaned_data['slug'], is_enabled=is_enabled)
class SignUpForm(forms.Form):
"""
From class for signing up a new user and new account.
"""
name = forms.CharField(
max_length=50, help_text=_("The name of the hub"))
# TODO don't need, as auto generated is better, add private/public toggle
slug = forms.SlugField(
max_length=50,
help_text=_("The name in all lowercase, "
"suitable for URL identification"))
email = forms.EmailField()
|
{
"content_hash": "c05fa9ebf648212a5606df3e3ec05fb7",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 78,
"avg_line_length": 36.201388888888886,
"alnum_prop": 0.5996547093803951,
"repo_name": "toolhub/toolhub.co",
"id": "99dd6df47935821ca9c6c5454c84e78fe8cac4b7",
"size": "5213",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "hubs/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9685"
},
{
"name": "JavaScript",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "123001"
},
{
"name": "Shell",
"bytes": "7558"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError: # pragma: nocover
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\
link(callback)
|
{
"content_hash": "a5db953394dc42fc9082093130382c11",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 87,
"avg_line_length": 25.433333333333334,
"alnum_prop": 0.6068152031454783,
"repo_name": "171121130/SWI",
"id": "a12b806af3825d739bce9e8c3905c9f7a08b3448",
"size": "763",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/apscheduler/executors/gevent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1316"
},
{
"name": "C",
"bytes": "311064"
},
{
"name": "C++",
"bytes": "212386"
},
{
"name": "CSS",
"bytes": "7326"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "155293"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Mako",
"bytes": "9463"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "15004056"
},
{
"name": "Tcl",
"bytes": "1284698"
}
],
"symlink_target": ""
}
|
"""
Client side of the compute RPC API.
"""
from oslo.config import cfg
from nova import exception
from nova.objects import base as objects_base
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova import rpcclient
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services. If you '
'plan to do a live upgrade from havana to icehouse, you should '
'set this option to "icehouse-compat" before beginning the live '
'upgrade procedure.')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance['host']:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return instance['host']
def _icehouse_compat():
return CONF.upgrade_levels.compute == 'icehouse-compat'
def _get_version(version):
# NOTE(russellb) If "[upgrade_levels] compute=icehouse-compat" is set in
# the config, we switch into a special mode where we send 3.0 as the
# version number instead of 2.latest. 3.0 == 2.latest, and both Havana
# and Icehouse compute nodes support 3.0. This allows for a live
# upgrade environment with a mix of Havana and Icehouse compute nodes.
if _icehouse_compat():
return ComputeAPI.VERSION_ALIASES['icehouse-compat']
return version
class ComputeAPI(rpcclient.RpcProxy):
'''Client side of the compute rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
1.4 - Remove instance_uuid, add instance argument to reboot_instance()
1.5 - Remove instance_uuid, add instance argument to pause_instance(),
unpause_instance()
1.6 - Remove instance_uuid, add instance argument to suspend_instance()
1.7 - Remove instance_uuid, add instance argument to
get_console_output()
1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
1.9 - Remove instance_uuid, add instance argument to attach_volume()
1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
1.12 - Remove instance_uuid, add instance argument to confirm_resize()
1.13 - Remove instance_uuid, add instance argument to detach_volume()
1.14 - Remove instance_uuid, add instance argument to finish_resize()
1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
1.16 - Remove instance_uuid, add instance argument to get_diagnostics()
1.17 - Remove instance_uuid, add instance argument to get_vnc_console()
1.18 - Remove instance_uuid, add instance argument to inject_file()
1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
1.23 - Remove instance_id, add instance argument to
pre_live_migration()
1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
1.28 - Remove instance_uuid, add instance argument to reset_network()
1.29 - Remove instance_uuid, add instance argument to resize_instance()
1.30 - Remove instance_uuid, add instance argument to resume_instance()
1.31 - Remove instance_uuid, add instance argument to revert_resize()
1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
1.38 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.39 - Remove instance_uuid, add instance argument to run_instance()
1.40 - Remove instance_id, add instance argument to live_migration()
1.41 - Adds refresh_instance_security_rules()
1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
1.43 - Add migrate_data to live_migration()
1.44 - Adds reserve_block_device_name()
2.0 - Remove 1.x backwards compat
2.1 - Adds orig_sys_metadata to rebuild_instance()
2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
2.4 - Add bdms to terminate_instance
2.5 - Add block device and network info to reboot_instance
2.6 - Remove migration_id, add migration to resize_instance
2.7 - Remove migration_id, add migration to confirm_resize
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
2.11 - Adds soft_delete_instance() and restore_instance()
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
2.16 - Add instance_type to resize_instance
2.17 - Add get_backdoor_port()
2.18 - Add bdms to rebuild_instance
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
2.28 - Adds check_instance_shared_storage()
2.29 - Made start_instance() and stop_instance() take new-world
instance objects
2.30 - Adds live_snapshot_instance()
2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
2.32 - Make reboot_instance take a new world instance object
2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
2.34 - Added swap_volume()
2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
2.37 - Added the leagacy_bdm_in_spec parameter to run_instance
2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
2.40 - Made reset_network() take new-world instance object
2.41 - Make inject_network_info take new-world instance object
2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
2.43 - Made prep_resize() take new-world instance object
2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
2.45 - Made resize_instance() take new-world objects
2.46 - Made finish_resize() take new-world objects
2.47 - Made finish_revert_resize() take new-world objects
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
VERSION_ALIASES = {
'grizzly': '2.27',
'icehouse-compat': '3.0',
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
super(ComputeAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer(),
version_cap=version_cap)
self.client = self.get_client()
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host,
version=_get_version('2.14'))
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate_p, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance_p, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.25'))
return cctxt.call(ctxt, 'attach_interface',
instance=instance_p, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'attach_volume',
instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint)
def change_instance_metadata(self, ctxt, instance, diff):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance_p, diff=diff)
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
if _icehouse_compat() or self.client.can_send_version('2.38'):
version = _get_version('2.38')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=destination,
version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
if _icehouse_compat() or self.client.can_send_version('2.38'):
version = _get_version('2.38')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.28'))
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance_p,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
if _icehouse_compat() or self.client.can_send_version('2.39'):
version = _get_version('2.39')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.7'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.25'))
cctxt.cast(ctxt, 'detach_interface',
instance=instance_p, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'detach_volume',
instance=instance_p, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.46'):
version = _get_version('2.46')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.8'
cctxt = self.client.prepare(server=host,
version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.47'):
version = _get_version('2.47')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.13'
cctxt = self.client.prepare(server=host,
version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_console_output',
instance=instance_p, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_diagnostics',
instance=instance_p)
def get_vnc_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance_p, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.24'))
return cctxt.call(ctxt, 'get_spice_console',
instance=instance_p, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.26'))
return cctxt.call(ctxt, 'validate_console_port',
instance=instance_p, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_file(self, ctxt, instance, path, file_contents):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'inject_file',
instance=instance_p, path=path,
file_contents=file_contents)
def inject_network_info(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.41'):
version = _get_version('2.41')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
cctxt.cast(ctxt, 'live_migration', instance=instance_p,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.36'):
version = _get_version('2.36')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt,
'post_live_migration_at_destination',
instance=instance_p, block_migration=block_migration)
def power_off_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'power_off_instance', instance=instance_p)
def power_on_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'power_on_instance', instance=instance_p)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host,
version=_get_version('2.21'))
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance_p,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None):
if _icehouse_compat() or self.client.can_send_version('2.43'):
version = _get_version('2.43')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.20'
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(server=host,
version=version)
cctxt.cast(ctxt, 'prep_resize',
instance=instance,
instance_type=instance_type_p,
image=image_p, reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
if _icehouse_compat() or self.client.can_send_version('2.32'):
version = _get_version('2.32')
else:
version = '2.23'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=_get_version('2.22'))
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
recreate=recreate, on_shared_storage=on_shared_storage)
def refresh_provider_fw_rules(self, ctxt, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
aggregate_p = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host,
version=_get_version('2.15'))
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate_p, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance_p, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance_p, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'rescue_instance',
instance=instance_p,
rescue_password=rescue_password)
def reset_network(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.40'):
version = _get_version('2.40')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.45'):
version = _get_version('2.45')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.16'
instance_type_p = jsonutils.to_primitive(instance_type)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance',
instance=instance, migration=migration,
image=image, reservations=reservations,
instance_type=instance_type_p)
def resume_instance(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.33'):
version = _get_version('2.33')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.39'):
version = _get_version('2.39')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
migration = jsonutils.to_primitive(
objects_base.obj_to_primitive(migration))
version = '2.12'
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance_p)
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
instance_p = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance_p, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node}
if _icehouse_compat() or self.client.can_send_version('2.37'):
version = _get_version('2.37')
msg_kwargs['legacy_bdm_in_spec'] = legacy_bdm_in_spec
else:
version = '2.19'
cctxt = self.client.prepare(server=host,
version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
return cctxt.call(ctxt, 'set_admin_password',
instance=instance_p, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.34'))
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.3'))
return cctxt.call(ctxt, 'reserve_block_device_name',
instance=instance_p, device=device,
volume_id=volume_id)
def live_snapshot_instance(self, ctxt, instance, image_id):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.30'))
cctxt.cast(ctxt, 'live_snapshot_instance',
instance=instance_p, image_id=image_id)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
if _icehouse_compat() or self.client.can_send_version('2.42'):
version = _get_version('2.42')
method = 'backup_instance'
extra_kwargs = dict()
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
method = 'snapshot_instance'
extra_kwargs = dict(image_type='backup')
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, method,
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation,
**extra_kwargs)
def snapshot_instance(self, ctxt, instance, image_id):
if _icehouse_compat() or self.client.can_send_version('2.42'):
version = _get_version('2.42')
extra_kwargs = dict()
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
extra_kwargs = dict(image_type='snapshot',
backup_type=None,
rotation=None)
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id,
**extra_kwargs)
def start_instance(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.29'):
version = _get_version('2.29')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
if _icehouse_compat() or self.client.can_send_version('2.29'):
version = _get_version('2.29')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.33'):
version = _get_version('2.33')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.35'):
version = _get_version('2.35')
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms_p,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
if _icehouse_compat() or self.client.can_send_version('2.36'):
version = _get_version('2.36')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'unrescue_instance', instance=instance_p)
def soft_delete_instance(self, ctxt, instance, reservations=None):
if _icehouse_compat() or self.client.can_send_version('2.35'):
version = _get_version('2.35')
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'restore_instance', instance=instance_p)
def shelve_instance(self, ctxt, instance, image_id=None):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.31'))
cctxt.cast(ctxt, 'shelve_instance',
instance=instance, image_id=image_id)
def shelve_offload_instance(self, ctxt, instance):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.31'))
cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance)
def unshelve_instance(self, ctxt, instance, host, image=None):
cctxt = self.client.prepare(server=host,
version=_get_version('2.31'))
cctxt.cast(ctxt, 'unshelve_instance',
instance=instance, image=image)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.44'))
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance_p,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.44'))
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance_p,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
class SecurityGroupAPI(rpcclient.RpcProxy):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '2.0'
def __init__(self):
super(SecurityGroupAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION)
self.client = self.get_client()
def refresh_security_group_rules(self, ctxt, security_group_id, host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
cctxt = self.client.prepare(server=host,
version=_get_version('2.0'))
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=_get_version('2.0'))
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
|
{
"content_hash": "4a3ce0301b05bc399814da4452461584",
"timestamp": "",
"source": "github",
"line_count": 936,
"max_line_length": 79,
"avg_line_length": 46.97115384615385,
"alnum_prop": 0.5887182986466507,
"repo_name": "rickerc/nova_audit",
"id": "5784f67ba65126c49ce273d7024b2c362d75092e",
"size": "44617",
"binary": false,
"copies": "2",
"ref": "refs/heads/cis-havana-staging",
"path": "nova/compute/rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13409215"
},
{
"name": "Shell",
"bytes": "17194"
}
],
"symlink_target": ""
}
|
import argparse
from sys import argv
from vang.bitbucket.get_branches import do_get_branches
from vang.bitbucket.utils import get_repo_specs
def has_branch(repo_specs, branch):
for spec in repo_specs:
branches = [
b["displayId"] for spec, bs in do_get_branches((spec,), branch) for b in bs
]
yield spec, branch in branches
def main(
branch, only_has=True, only_not_has=False, dirs=None, repos=None, projects=None
):
specs = get_repo_specs(dirs, repos, projects)
for spec, has in has_branch(specs, branch):
if only_has:
if has:
print(f"{spec[0]}/{spec[1]}")
elif only_not_has:
if not has:
print(f"{spec[0]}/{spec[1]}")
else:
print(f"{spec[0]}/{spec[1]}, {branch}: {has}")
def parse_args(args):
parser = argparse.ArgumentParser(
description="Check repository branches in Bitbucket"
)
parser.add_argument("branch", help="The branch to check")
filter_group = parser.add_mutually_exclusive_group()
filter_group.add_argument(
"-o",
"--only_has",
action="store_true",
help="Print only repos that has the branch.",
)
filter_group.add_argument(
"-n",
"--only_not_has",
action="store_true",
help="Print only repos that not has the branch.",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d",
"--dirs",
nargs="*",
default=["."],
help="Git directories to extract repo information from",
)
group.add_argument(
"-r", "--repos", nargs="*", help="Repos, e.g. key1/repo1 key2/repo2"
)
group.add_argument("-p", "--projects", nargs="*", help="Projects, e.g. key1 key2")
return parser.parse_args(args)
if __name__ == "__main__": # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
{
"content_hash": "d70b6cb770206680e375704127c6c687",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 87,
"avg_line_length": 29.646153846153847,
"alnum_prop": 0.5760249091852621,
"repo_name": "bjuvensjo/scripts",
"id": "6bc603a88279009240b9372730ee3866559c7d68",
"size": "1950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vang/bitbucket/has_branch.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "502278"
}
],
"symlink_target": ""
}
|
prefixes = ["", "lookup", "look up", "who is", "who has", "tell me who", "give me the name of", ("who", "is"), ("tell me who", "is")]
bodies = [
"{CallSignA} {CallSignB} {CallSignC}",
"{CallSignA} {CallSignB} {CallSignC} {CallSignD}",
"{CallSignA} {CallSignB} {CallSignC} {CallSignD} {CallSignE}",
"{CallSignA} {CallSignB} {CallSignC} {CallSignD} {CallSignE} {CallSignF}",
]
suffixes = ["", "please", "stop", "over"]
for prefix in prefixes:
for body in bodies:
for suffix in suffixes:
if type(prefix) == tuple:
print ("GetQRZ " + prefix[0] + " " + body + " " + prefix[1] + " " + suffix).strip()
else:
print ("GetQRZ " + prefix + " " + body + " " + suffix).strip()
print '\n'
|
{
"content_hash": "ef49b7d9951a2b01dadf7c1e7e53d6ac",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 133,
"avg_line_length": 42.294117647058826,
"alnum_prop": 0.5744089012517385,
"repo_name": "maihde/alexa-qrz",
"id": "967cbc0c987bf0ac2f54d12d25f7dde7c8d7b698",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_utterances.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "24699"
},
{
"name": "Python",
"bytes": "735"
}
],
"symlink_target": ""
}
|
import re
import subprocess
import sys
from .source import Source
from .configuration import Configuration
from itertools import chain
import io
import codecs
class PDFKit(object):
"""
Main class that does all generation routine.
:param url_or_file: str - either a URL, a path to a file or a string containing HTML
to convert
:param type_: str - either 'url', 'file' or 'string'
:param options: dict (optional) with wkhtmltopdf options, with or w/o '--'
:param toc: dict (optional) - toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: str (optional) - url/filename with a cover html page
:param configuration: (optional) instance of pdfkit.configuration.Configuration()
"""
class ImproperSourceError(Exception):
"""Wrong source type for stylesheets"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def __init__(self, url_or_file, type_, options=None, toc=None, cover=None,
css=None, configuration=None):
self.source = Source(url_or_file, type_)
self.configuration = (Configuration() if configuration is None
else configuration)
self.wkhtmltopdf = self.configuration.wkhtmltopdf
self.options = dict()
if self.source.isString():
self.options.update(self._find_options_in_meta(url_or_file))
if options is not None: self.options.update(options)
self.options = self._normalize_options(self.options)
toc = {} if toc is None else toc
self.toc = self._normalize_options(toc)
self.cover = cover
self.css = css
self.stylesheets = []
def command(self, path=None):
if self.css:
self._prepend_css(self.css)
args = [self.wkhtmltopdf, '--quiet']
args += list(chain.from_iterable(list(self.options.items())))
args = [_f for _f in args if _f]
if self.toc:
args.append('toc')
args += list(chain.from_iterable(list(self.toc.items())))
if self.cover:
args.append('cover')
args.append(self.cover)
# If the source is a string then we will pipe it into wkhtmltopdf
# If the source is file-like then we will read from it and pipe it in
if self.source.isString() or self.source.isFileObj():
args.append('-')
else:
if isinstance(self.source.source, str):
args.append(self.source.to_s())
else:
args += self.source.source
# If output_path evaluates to False append '-' to end of args
# and wkhtmltopdf will pass generated PDF to stdout
if path:
args.append(path)
else:
args.append('-')
return args
def to_pdf(self, path=None):
args = self.command(path)
result = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# If the source is a string then we will pipe it into wkhtmltopdf.
# If we want to add custom CSS to file then we read input file to
# string and prepend css to it and then pass it to stdin.
# This is a workaround for a bug in wkhtmltopdf (look closely in README)
if self.source.isString() or (self.source.isFile() and self.css):
input = self.source.to_s().encode('utf-8')
elif self.source.isFileObj():
input = self.source.source.read().encode('utf-8')
else:
input = None
stdout, stderr = result.communicate(input=input)
exit_code = result.returncode
if 'cannot connect to X server' in stderr.decode('utf-8'):
raise IOError('%s\n'
'You will need to run whktmltopdf within a "virutal" X server.\n'
'Go to the link above for more information\n'
'https://github.com/JazzCore/python-pdfkit/wiki/Using-wkhtmltopdf-without-X-server' % stderr.decode('utf-8'))
if 'Error' in stderr.decode('utf-8'):
raise IOError('wkhtmltopdf reported an error:\n' + stderr.decode('utf-8'))
if exit_code != 0:
raise IOError("wkhtmltopdf exited with non-zero code {0}. error:\n{1}".format(exit_code, stderr.decode("utf-8")))
# Since wkhtmltopdf sends its output to stderr we will capture it
# and properly send to stdout
if '--quiet' not in args:
sys.stdout.write(stderr.decode('utf-8'))
if not path:
return stdout
else:
try:
with codecs.open(path, encoding='utf-8') as f:
# read 4 bytes to get PDF signature '%PDF'
text = f.read(4)
if text == '':
raise IOError('Command failed: %s\n'
'Check whhtmltopdf output without \'quiet\' '
'option' % ' '.join(args))
return True
except IOError:
raise IOError('Command failed: %s\n'
'Check whhtmltopdf output without \'quiet\' option' %
' '.join(args))
def _normalize_options(self, options):
"""Updates a dict of config options to make then usable on command line
:param options: dict {option name: value}
returns:
dict: {option name: value} - option names lower cased and prepended with
'--' if necessary. Non-empty values cast to str
"""
normalized_options = {}
for key, value in list(options.items()):
if not '--' in key:
normalized_key = '--%s' % self._normalize_arg(key)
else:
normalized_key = self._normalize_arg(key)
normalized_options[normalized_key] = str(value) if value else value
return normalized_options
def _normalize_arg(self, arg):
return arg.lower()
def _style_tag_for(self, stylesheet):
return "<style>%s</style>" % stylesheet
def _prepend_css(self, path):
if self.source.isUrl() or isinstance(self.source.source, list):
raise self.ImproperSourceError('CSS files can be added only to a single '
'file or string')
if not isinstance(path, list):
path = [path]
css_data = []
for p in path:
if isinstance(p, io.StringIO):
css_data.append(p.read())
else:
with codecs.open(p, encoding="UTF-8") as f:
css_data.append(f.read())
css_data = "\n".join(css_data)
if self.source.isFile():
with codecs.open(self.source.to_s(), encoding="UTF-8") as f:
inp = f.read()
self.source = Source(
inp.replace('</head>', self._style_tag_for(css_data) + '</head>'),
'string')
elif self.source.isString():
if '</head>' in self.source.to_s():
self.source.source = self.source.to_s().replace(
'</head>', self._style_tag_for(css_data) + '</head>')
else:
self.source.source = self._style_tag_for(css_data) + self.source.to_s()
def _find_options_in_meta(self, content):
"""Reads 'content' and extracts options encoded in HTML meta tags
:param content: str or file-like object - contains HTML to parse
returns:
dict: {config option: value}
"""
if (isinstance(content, io.IOBase)
or content.__class__.__name__ == 'StreamReaderWriter'):
content = content.read()
found = {}
for x in re.findall('<meta [^>]*>', content):
if re.search('name=["\']%s' % self.configuration.meta_tag_prefix, x):
name = re.findall('name=["\']%s([^"\']*)' %
self.configuration.meta_tag_prefix, x)[0]
found[name] = re.findall('content=["\']([^"\']*)', x)[0]
return found
|
{
"content_hash": "21b257abc451cb6dee2fc2c187f94646",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 135,
"avg_line_length": 37.90867579908676,
"alnum_prop": 0.548662972777644,
"repo_name": "phobson/python-pdfkit",
"id": "a3318c930a9a36215d316f0faab2ac95e18f11e9",
"size": "8326",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pdfkit/pdfkit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27896"
},
{
"name": "Shell",
"bytes": "303"
}
],
"symlink_target": ""
}
|
""" Practica 8-9 - Magicians
Make a list of magician’s names.
Pass the list to a function called show_magicians(),
which prints the name of each magician in the list ."""
def show_messages(mensaje):
for mensaje in mensaje:
print(mensaje)
mensaje = ["Hola soy el mensaje 1", "Hi i am the message 2", "Salut je suis le message 3"]
show_messages(mensaje)
|
{
"content_hash": "f12de8ee8cc9d8f24138c08cec70b5c2",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 90,
"avg_line_length": 31.416666666666668,
"alnum_prop": 0.6870026525198939,
"repo_name": "AnhellO/DAS_Sistemas",
"id": "28c884430c01a1ea0606c067de9c126f225436c2",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/ene-jun-2022",
"path": "Ene-Jun-2022/jesus-raul-alvarado-torres/práctica-2/capítulo-8/8-9. Magicians.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "8515"
},
{
"name": "Go",
"bytes": "25845"
},
{
"name": "HTML",
"bytes": "36671"
},
{
"name": "Python",
"bytes": "716604"
}
],
"symlink_target": ""
}
|
from dbSetup.tinyDbSetup import db
import tinydb
PATH = 'tinydb.json'
TEST_DATA = [
{'name': 'John', 'age': 40},
{'name': 'Ringo', 'age': 79},
{'name': 'Paul', 'age': 77},
{'name': 'George', 'age': 58}
]
def test_basic():
""" simplest possible test to verify the import works """
db(PATH).purge_tables()
db(PATH).insert({'thing1': 'thing2'})
assert db(PATH).all() == [{'thing1': 'thing2'}]
def test_selects():
""" confirm I can narrow from 'all' """
db(PATH).purge_tables()
for d in TEST_DATA:
db(PATH).insert(d)
assert db(PATH).all() == TEST_DATA
paul = db(PATH).search(tinydb.where('name') == 'Paul')
assert paul == [{'name': 'Paul', 'age': 77}]
def test_insert_list():
""" Confirm there is syntax for inserting many """
db(PATH).purge_tables()
db(PATH).insert_multiple(TEST_DATA)
assert db(PATH).all() == TEST_DATA
|
{
"content_hash": "97fce8b756b774481f90cc2e025f08b7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 61,
"avg_line_length": 25.194444444444443,
"alnum_prop": 0.5788313120176406,
"repo_name": "chrisbrake/PythonSandbox",
"id": "e5343e935f4a694ccce57e6585427e9d264a23fd",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_dbSetup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "164"
},
{
"name": "HTML",
"bytes": "2519"
},
{
"name": "JavaScript",
"bytes": "3317"
},
{
"name": "Python",
"bytes": "35318"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import Subplot
fig = plt.figure(1, (3,3))
ax = Subplot(fig, 111)
fig.add_subplot(ax)
ax.axis["right"].set_visible(False)
ax.axis["top"].set_visible(False)
plt.show()
|
{
"content_hash": "286e5c9007bf7853e7fe6768ff7589eb",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 18.53846153846154,
"alnum_prop": 0.7219917012448133,
"repo_name": "yavalvas/yav_com",
"id": "68d42677cd5415afc578dfa14d4c72467b8e4abe",
"size": "241",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "build/matplotlib/doc/mpl_toolkits/axes_grid/figures/simple_axisline3.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85377"
},
{
"name": "C++",
"bytes": "568744"
},
{
"name": "CSS",
"bytes": "47585"
},
{
"name": "Erlang",
"bytes": "7112"
},
{
"name": "HTML",
"bytes": "14865"
},
{
"name": "JavaScript",
"bytes": "359937"
},
{
"name": "Objective-C",
"bytes": "188937"
},
{
"name": "Perl",
"bytes": "229498"
},
{
"name": "Python",
"bytes": "7684946"
},
{
"name": "Shell",
"bytes": "1805"
}
],
"symlink_target": ""
}
|
""" To make use of recursive calls, this function must return
two things:
1) Length of LIS ending with element arr[n-1]. We use
max_ending_here for this purpose
2) Overall maximum as the LIS may end with an element
before arr[n-1] max_ref is used this purpose.
The value of LIS of full array of size n is stored in
*max_ref which is our final result """
# global variable to store the maximum
global maximum
def _lis(arr , n ):
# to allow the access of global variable
global maximum
# Base Case
if n == 1 :
return 1
# maxEndingHere is the length of LIS ending with arr[n-1]
maxEndingHere = 1
"""Recursively get all LIS ending with arr[0], arr[1]..arr[n-2]
IF arr[n-1] is maller than arr[n-1], and max ending with
arr[n-1] needs to be updated, then update it"""
for i in xrange(1, n):
res = _lis(arr , i)
if arr[i-1] < arr[n-1] and res+1 > maxEndingHere:
maxEndingHere = res +1
# Compare maxEndingHere with overall maximum. And
# update the overall maximum if needed
maximum = max(maximum , maxEndingHere)
return maxEndingHere
def lis(arr):
# to allow the access of global variable
global maximum
# lenght of arr
n = len(arr)
# maximum variable holds the result
maximum = 1
# The function _lis() stores its result in maximum
_lis(arr , n)
return maximum
# Driver program to test the above function
arr = [10 , 22 , 9 , 33 , 21 , 50 , 41 , 60]
n = len(arr)
print "Length of lis is ", lis(arr)
|
{
"content_hash": "be07bff7b360b9aaa6aaad7e9767bff4",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 67,
"avg_line_length": 26.689655172413794,
"alnum_prop": 0.6479328165374677,
"repo_name": "churrizo/Algorithms_Example",
"id": "8aebbb5df088dc9f6f56d33ef689860ba0b3658b",
"size": "1596",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "Longest-Common-Subsequence/Python/Longest_increasing _subsequence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13450"
},
{
"name": "C#",
"bytes": "6444"
},
{
"name": "C++",
"bytes": "80036"
},
{
"name": "Go",
"bytes": "10926"
},
{
"name": "Java",
"bytes": "90833"
},
{
"name": "JavaScript",
"bytes": "22643"
},
{
"name": "Kotlin",
"bytes": "1118"
},
{
"name": "Perl",
"bytes": "277"
},
{
"name": "Python",
"bytes": "53227"
},
{
"name": "Racket",
"bytes": "132"
},
{
"name": "Ruby",
"bytes": "8025"
},
{
"name": "Rust",
"bytes": "3251"
},
{
"name": "Swift",
"bytes": "12576"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.http import HttpResponse, HttpResponseBadRequest
from custom.apps.wisepill.models import WisePillDeviceEvent
from corehq.apps.sms.handlers.keyword import handle_structured_sms
from corehq.apps.sms.models import CommConnectCase
from corehq.apps.reminders.models import SurveyKeyword, METHOD_STRUCTURED_SMS
from corehq.apps.api.models import require_api_user_permission, PERMISSION_POST_WISEPILL
@require_api_user_permission(PERMISSION_POST_WISEPILL)
def device_data(request):
if "data" not in request.POST:
return HttpResponseBadRequest("Missing 'data' POST parameter.")
data = request.POST.get("data")
data = data.strip()
data_points = data.split(",")
device_id = None
for data_point in data_points:
key_value = data_point.partition("=")
key = key_value[0].strip().upper()
value = key_value[2].strip()
if key == "SN":
device_id = value
break
if device_id is None:
return HttpResponseBadRequest("Missing 'SN' in data string.")
# This view lookup is an implicit assert that either one device exists
# with the given device_id, or no devices exist with this device_id.
case = CommConnectCase.view("wisepill/device",
key=[device_id],
include_docs=True).one()
event = WisePillDeviceEvent(
domain=case.domain if case is not None else None,
data=data,
received_on=datetime.utcnow(),
case_id=case._id if case is not None else None,
processed=False,
)
event.save()
if case is not None:
survey_keywords = SurveyKeyword.get_all(case.domain)
for survey_keyword in survey_keywords:
if survey_keyword.keyword.upper() == "DEVICE_EVENT":
for survey_keyword_action in survey_keyword.actions:
if survey_keyword_action.action == METHOD_STRUCTURED_SMS:
handle_structured_sms(survey_keyword, survey_keyword_action, case, None, "DEVICE_EVENT,%s" % data, send_response=False)
event.processed = True
event.save()
break
return HttpResponse("")
|
{
"content_hash": "1527d7da073e176d5fbd6ffeafc422e7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 143,
"avg_line_length": 40.875,
"alnum_prop": 0.6339012669287899,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "5d0c0dfa58962b9a803721382cc52a85067649bf",
"size": "2289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/apps/wisepill/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
}
|
import copy
import datetime
import os
import pickle
import time
import unittest
import warnings
from django.test.utils import IgnoreDeprecationWarningsMixin
from django.utils.deprecation import RemovedInDjango19Warning
# Swallow the import-time warning to test the deprecated implementation.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RemovedInDjango19Warning)
from django.utils.tzinfo import FixedOffset, LocalTimezone
class TzinfoTests(IgnoreDeprecationWarningsMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TzinfoTests, cls).setUpClass()
cls.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'US/Eastern'
try:
# Check if a timezone has been set
time.tzset()
cls.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
cls.tz_tests = False
@classmethod
def tearDownClass(cls):
if cls.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = cls.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if cls.tz_tests:
time.tzset()
super(TzinfoTests, cls).tearDownClass()
def test_fixedoffset(self):
self.assertEqual(repr(FixedOffset(0)), '+0000')
self.assertEqual(repr(FixedOffset(60)), '+0100')
self.assertEqual(repr(FixedOffset(-60)), '-0100')
self.assertEqual(repr(FixedOffset(280)), '+0440')
self.assertEqual(repr(FixedOffset(-280)), '-0440')
self.assertEqual(repr(FixedOffset(-78.4)), '-0118')
self.assertEqual(repr(FixedOffset(78.4)), '+0118')
self.assertEqual(repr(FixedOffset(-5.5 * 60)), '-0530')
self.assertEqual(repr(FixedOffset(5.5 * 60)), '+0530')
self.assertEqual(repr(FixedOffset(-.5 * 60)), '-0030')
self.assertEqual(repr(FixedOffset(.5 * 60)), '+0030')
def test_16899(self):
if not self.tz_tests:
return
ts = 1289106000
# Midnight at the end of DST in US/Eastern: 2010-11-07T05:00:00Z
dt = datetime.datetime.utcfromtimestamp(ts)
# US/Eastern -- we force its representation to "EST"
tz = LocalTimezone(dt + datetime.timedelta(days=1))
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts - 3600, tz)),
'datetime.datetime(2010, 11, 7, 0, 0, tzinfo=EST)')
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts, tz)),
'datetime.datetime(2010, 11, 7, 1, 0, tzinfo=EST)')
self.assertEqual(
repr(datetime.datetime.fromtimestamp(ts + 3600, tz)),
'datetime.datetime(2010, 11, 7, 1, 0, tzinfo=EST)')
def test_copy(self):
now = datetime.datetime.now()
self.assertIsInstance(copy.copy(FixedOffset(90)), FixedOffset)
self.assertIsInstance(copy.copy(LocalTimezone(now)), LocalTimezone)
def test_deepcopy(self):
now = datetime.datetime.now()
self.assertIsInstance(copy.deepcopy(FixedOffset(90)), FixedOffset)
self.assertIsInstance(copy.deepcopy(LocalTimezone(now)), LocalTimezone)
def test_pickling_unpickling(self):
now = datetime.datetime.now()
self.assertIsInstance(pickle.loads(pickle.dumps(FixedOffset(90))), FixedOffset)
self.assertIsInstance(pickle.loads(pickle.dumps(LocalTimezone(now))), LocalTimezone)
|
{
"content_hash": "6b4d4b038a6a1aa6dfc0fb9f59c5cce4",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 92,
"avg_line_length": 38.32967032967033,
"alnum_prop": 0.648795871559633,
"repo_name": "szopu/django",
"id": "4d1af62f0a787832a4bc23379fcd24a8a01b5ebc",
"size": "3488",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tests/utils_tests/test_tzinfo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Python",
"bytes": "10219556"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
from bokeh.plotting import *
from bokeh.objects import Range1d
# Define some categories
categories = [
'ousia', 'poson', 'poion', 'pros ti', 'pou',
'pote', 'keisthai', 'echein', 'poiein', 'paschein',
]
# Create data
N = 10
data = { cat : np.random.randint(10, 100, size=N) for cat in categories }
# Define a little function to stack series together to make polygons. Soon
# this will be built into Bokeh.
def stacked(data, categories):
ys = []
last = np.zeros(len(data.values()[0]))
for cat in categories:
next = last + data[cat]
ys.append(np.hstack((last[::-1], next)))
last = next
return ys
# Get the y coordinates of the stacked data
ys = stacked(data, categories)
# The x coordinates for each polygon are simply the series concatenated
# with its reverse.
xs = [np.hstack((categories[::-1], categories))] * len(ys)
# Pick out a color palette
colors = brewer["Spectral"][len(ys)]
# EXERCISE: output static HTML file
output_file("style.html")
# EXERCISE: play around with parameters like:
# - line_color
# - line_alpha
# - line_width
# - line_dash (e.g., [2,4])
# - fill_color
# - fill_alpha
# - background_fill
patches(xs, ys, x_range=categories, y_range=Range1d(start=0, end=800),
color=colors, alpha=0.8, line_color=None, background_fill="lightgrey",
title="Categories of Brewering")
# EXERCISE: configure all of the following plot properties
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
axis().major_label_text_font_size = "12pt"
axis().major_label_text_font_style = "bold"
axis().major_label_standoff = 10 # distance of tick labels from ticks
axis().axis_line_color = None # color, or None, to suppress the line
xaxis().major_label_orientation = np.pi/4 # radians, "horizontal", "vertical", "normal"
xaxis().major_tick_in = 10 # distance ticks extends into the plot
xaxis().major_tick_out = 0 # and distance they extend out
xaxis().major_tick_line_color = "white"
show()
|
{
"content_hash": "65fea2296687e2f75e44d3732d8ab92b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 89,
"avg_line_length": 32.5625,
"alnum_prop": 0.6602687140115163,
"repo_name": "sahat/bokeh",
"id": "057c4a78e2e3b1fe25f063d999a9aa13836ccb4f",
"size": "2085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sphinx/source/tutorial/solutions/style.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "329134"
},
{
"name": "CoffeeScript",
"bytes": "2099237"
},
{
"name": "JavaScript",
"bytes": "2683660"
},
{
"name": "Python",
"bytes": "973217"
},
{
"name": "Scala",
"bytes": "27312"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
import testtools
from testtools import matchers
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2GetTest(base.BaseVolumeTest):
@classmethod
def setup_clients(cls):
super(VolumesV2GetTest, cls).setup_clients()
cls.client = cls.volumes_client
@classmethod
def resource_setup(cls):
super(VolumesV2GetTest, cls).resource_setup()
cls.name_field = cls.special_fields['name_field']
cls.descrip_field = cls.special_fields['descrip_field']
def _delete_volume(self, volume_id):
self.client.delete_volume(volume_id)
self.client.wait_for_resource_deletion(volume_id)
def _volume_create_get_update_delete(self, **kwargs):
# Create a volume, Get it's details and Delete the volume
volume = {}
v_name = data_utils.rand_name('Volume')
metadata = {'Type': 'Test'}
# Create a volume
kwargs[self.name_field] = v_name
kwargs['metadata'] = metadata
volume = self.client.create_volume(**kwargs)['volume']
self.assertIn('id', volume)
self.addCleanup(self._delete_volume, volume['id'])
waiters.wait_for_volume_status(self.client, volume['id'], 'available')
self.assertIn(self.name_field, volume)
self.assertEqual(volume[self.name_field], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Get Volume information
fetched_volume = self.client.show_volume(volume['id'])['volume']
self.assertEqual(v_name,
fetched_volume[self.name_field],
'The fetched Volume name is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume id is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
if 'imageRef' in kwargs:
self.assertEqual('true', fetched_volume['bootable'])
if 'imageRef' not in kwargs:
self.assertEqual('false', fetched_volume['bootable'])
# Update Volume
# Test volume update when display_name is same with original value
params = {self.name_field: v_name}
self.client.update_volume(volume['id'], **params)
# Test volume update when display_name is new
new_v_name = data_utils.rand_name('new-Volume')
new_desc = 'This is the new description of volume'
params = {self.name_field: new_v_name,
self.descrip_field: new_desc}
update_volume = self.client.update_volume(
volume['id'], **params)['volume']
# Assert response body for update_volume method
self.assertEqual(new_v_name, update_volume[self.name_field])
self.assertEqual(new_desc, update_volume[self.descrip_field])
# Assert response body for show_volume method
updated_volume = self.client.show_volume(volume['id'])['volume']
self.assertEqual(volume['id'], updated_volume['id'])
self.assertEqual(new_v_name, updated_volume[self.name_field])
self.assertEqual(new_desc, updated_volume[self.descrip_field])
self.assertThat(updated_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
# Test volume create when display_name is none and display_description
# contains specific characters,
# then test volume update if display_name is duplicated
new_volume = {}
new_v_desc = data_utils.rand_name('@#$%^* description')
params = {self.descrip_field: new_v_desc,
'availability_zone': volume['availability_zone']}
new_volume = self.client.create_volume(**params)['volume']
self.assertIn('id', new_volume)
self.addCleanup(self._delete_volume, new_volume['id'])
waiters.wait_for_volume_status(self.client,
new_volume['id'], 'available')
params = {self.name_field: volume[self.name_field],
self.descrip_field: volume[self.descrip_field]}
self.client.update_volume(new_volume['id'], **params)
if 'imageRef' in kwargs:
self.assertEqual('true', updated_volume['bootable'])
if 'imageRef' not in kwargs:
self.assertEqual('false', updated_volume['bootable'])
@test.attr(type='smoke')
@test.idempotent_id('27fb0e9f-fb64-41dd-8bdb-1ffa762f0d51')
def test_volume_create_get_update_delete(self):
self._volume_create_get_update_delete()
@test.attr(type='smoke')
@test.idempotent_id('54a01030-c7fc-447c-86ee-c1182beae638')
@test.services('image')
def test_volume_create_get_update_delete_from_image(self):
image = self.compute_images_client.show_image(
CONF.compute.image_ref)['image']
min_disk = image.get('minDisk')
disk_size = max(min_disk, CONF.volume.volume_size)
self._volume_create_get_update_delete(
imageRef=CONF.compute.image_ref, size=disk_size)
@test.idempotent_id('3f591b4a-7dc6-444c-bd51-77469506b3a1')
@testtools.skipUnless(CONF.volume_feature_enabled.clone,
'Cinder volume clones are disabled')
def test_volume_create_get_update_delete_as_clone(self):
origin = self.create_volume()
self._volume_create_get_update_delete(source_volid=origin['id'])
class VolumesV1GetTest(VolumesV2GetTest):
_api_version = 1
|
{
"content_hash": "52e4e2cfe80ca0ee64606d0142edd12e",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 44.381294964028775,
"alnum_prop": 0.6156589398605933,
"repo_name": "HybridF5/tempest_debug",
"id": "5d83bb01a8380bb29f6bd4f8ee8a4c90dd17900f",
"size": "6805",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/api/volume/test_volumes_get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3634721"
},
{
"name": "Shell",
"bytes": "8175"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass
from simple_parsing import ArgumentParser, choice
from simple_parsing.helpers import Serializable
# import tensorflow as tf
class GAN:
@dataclass
class HyperParameters(Serializable):
"""Hyperparameters of the Generator and Discriminator networks."""
learning_rate: float = 1e-4
optimizer: str = choice("ADAM", "RMSPROP", "SGD", default="ADAM")
n_disc_iters_per_g_iter: int = (
1 # Number of Discriminator iterations per Generator iteration.
)
def __init__(self, hparams: HyperParameters):
self.hparams = hparams
class WGAN(GAN):
"""
Wasserstein GAN
"""
@dataclass
class HyperParameters(GAN.HyperParameters):
e_drift: float = 1e-4
"""Coefficient from the progan authors which penalizes critic outputs for having a large magnitude."""
def __init__(self, hparams: HyperParameters):
self.hparams = hparams
class WGANGP(WGAN):
"""
Wasserstein GAN with Gradient Penalty
"""
@dataclass
class HyperParameters(WGAN.HyperParameters):
e_drift: float = 1e-4
"""Coefficient from the progan authors which penalizes critic outputs for having a large magnitude."""
gp_coefficient: float = 10.0
"""Multiplying coefficient for the gradient penalty term of the loss equation. (10.0 is the default value, and was used by the PROGAN authors.)"""
def __init__(self, hparams: HyperParameters):
self.hparams: WGANGP.HyperParameters = hparams
print(self.hparams.gp_coefficient)
parser = ArgumentParser()
parser.add_arguments(WGANGP.HyperParameters, "hparams")
args = parser.parse_args()
print(args.hparams)
expected = """
WGANGP.HyperParameters(learning_rate=0.0001, optimizer='ADAM', n_disc_iters_per_g_iter=1, e_drift=0.0001, gp_coefficient=10.0)
"""
|
{
"content_hash": "dcd93909164ecbc4968e79058c8348f3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 154,
"avg_line_length": 30.672131147540984,
"alnum_prop": 0.6825227151256013,
"repo_name": "lebrice/SimpleParsing",
"id": "9dca0fa191165422ad562fb2bfa37f690cde6aaa",
"size": "1871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/inheritance/ml_inheritance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "607221"
}
],
"symlink_target": ""
}
|
import logging
from dbtracker.cli import Cli
import argparse
def main(argv=None):
parser = argparse.ArgumentParser(
description="Queries MySQL and PostgreSQL for stats")
parser.add_argument(
"-S", "--save",
action="store_true",
help="generate and save database stats")
parser.add_argument(
"-g", "--growth",
help="display a graph of the growth. Arguments in the form of run number ranges e.g. 3-4 or 4",
type=str)
parser.add_argument(
"-H", "--history",
help="List the datetime stamps of the last n saved runs",
type=int)
parser.add_argument(
"-c", "--count",
action="store_true",
help="Gets database row counts but does not save")
parser.add_argument(
"-d", "--dates",
type=str,
help="compares two datetime stamps e.g. 2015-04-24 16:18:57.166095-07:00 - 2015-04-22 17:00:50.746688-07:00")
parser.add_argument(
"-s", "--silent",
action="store_true",
help="turns logging levels down to ERROR only")
parser.add_argument(
"-C", "--config",
type=str,
help="use a custom configuration file path")
args = parser.parse_args(argv)
if args.silent:
logging.basicConfig(level=logging.ERROR)
else:
logging.basicConfig(level=logging.INFO)
cli = Cli(args)
cli.main()
|
{
"content_hash": "a519042229d35f724d079616ca3cb356",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 117,
"avg_line_length": 31.155555555555555,
"alnum_prop": 0.5977175463623395,
"repo_name": "PSU-OIT-ARC/dbtracker",
"id": "ffd3783d4e01d384aaf349259e989af3fe4dbf2f",
"size": "1402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbtracker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "345"
},
{
"name": "Python",
"bytes": "17268"
}
],
"symlink_target": ""
}
|
import argparse
import shutil
import icqsol_utils
# Parse Command Line.
parser = argparse.ArgumentParser()
parser.add_argument('--input', dest='input', help='Shape dataset selected from history')
parser.add_argument('--input_file_format_and_type', dest='input_file_format_and_type', help='Input file format and type')
parser.add_argument('--input_dataset_type', dest='input_dataset_type', help='Input dataset_type')
parser.add_argument('--input_texture', dest='input_texture', help='Image dataset selected from history')
parser.add_argument('--input_texture_file_format', dest='input_texture_file_format', help='Input texture file format')
parser.add_argument('--max_edge_length', dest='max_edge_length', type=float, default=float('inf'), help='Maximum edge length')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--output_vtk_type', dest='output_vtk_type', help='Output VTK type')
args = parser.parse_args()
input_format, input_file_type = icqsol_utils.get_format_and_type(args.input_file_format_and_type)
tmp_dir = icqsol_utils.get_temp_dir()
# Instantiate a ShapeManager for loading the input.
shape_mgr = icqsol_utils.get_shape_manager(input_format, args.input_dataset_type)
# Get the vtk polydata from the input dataset.
vtk_poly_data = shape_mgr.loadAsVtkPolyData(args.input)
# Apply the texture to the shape's surface.
vtk_poly_data = shape_mgr.addTextureToVtkPolyData(vtk_poly_data,
texture_file=args.input_texture,
max_edge_length=args.max_edge_length,
texture_file_format=args.input_texture_file_format)
# Define the output file format and type (the output_format can only be 'vtk').
output_format, output_file_type = icqsol_utils.get_format_and_type(args.output_vtk_type)
tmp_output_path = icqsol_utils.get_temporary_file_path(tmp_dir, output_format)
# Make sure the ShapeManager's writer is vtk.
shape_mgr.setWriter(file_format=icqsol_utils.VTK, vtk_dataset_type=icqsol_utils.POLYDATA)
# Save the output.
shape_mgr.saveVtkPolyData(vtk_poly_data=vtk_poly_data, file_name=tmp_output_path, file_type=output_file_type)
shutil.move(tmp_output_path, args.output)
|
{
"content_hash": "0593382a320984c16360357a25f2828a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 126,
"avg_line_length": 52.86046511627907,
"alnum_prop": 0.7149142102947647,
"repo_name": "pletzer/galaxy-csg",
"id": "5fc759df960330bd7cdd974985e143765937e328",
"size": "2295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/icqsol_add_texture/icqsol_add_texture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "34286"
},
{
"name": "Mako",
"bytes": "14553"
},
{
"name": "Python",
"bytes": "904"
}
],
"symlink_target": ""
}
|
from wtforms import validators
from werkzeug.datastructures import FileStorage
import wtforms
def validate_required_iff(**kwargs):
"""
Used as a validator within a wtforms.Form
This implements a conditional DataRequired
Each of the kwargs is a condition that must be met in the form
Otherwise, no validation is done
"""
def _validator(form, field):
all_conditions_met = True
for key, value in kwargs.iteritems():
if getattr(form, key).data != value:
all_conditions_met = False
if all_conditions_met:
# Verify that data exists
if field.data is None \
or (isinstance(field.data, (str, unicode))
and not field.data.strip()) \
or (isinstance(field.data, FileStorage)
and not field.data.filename.strip()):
raise validators.ValidationError('This field is required.')
else:
# This field is not required, ignore other errors
field.errors[:] = []
raise validators.StopValidation()
return _validator
def validate_greater_than(fieldname):
"""
Compares the value of two fields the value of self is to be greater than the supplied field.
:param fieldname:
The name of the other field to compare to.
"""
def _validator(form, field):
try:
other = form[fieldname]
except KeyError:
raise validators.ValidationError(field.gettext(u"Invalid field name '%s'.") % fieldname)
if field.data != '' and field.data < other.data:
message = field.gettext(u'Field must be greater than %s.' % fieldname)
raise validators.ValidationError(message)
return _validator
class Tooltip(object):
"""
An HTML form tooltip.
"""
def __init__(self, field_id, for_name, text):
self.field_id = field_id
self.text = text
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
return wtforms.widgets.HTMLString(
('<span name="%s_explanation"'
' class="explanation-tooltip glyphicon glyphicon-question-sign"'
' data-container="body"'
' title="%s"'
' ></span>') % (self.for_name, self.text))
def __repr__(self):
return 'Tooltip(%r, %r)' % (self.field_id, self.for_name, self.text)
class Explanation(object):
"""
An HTML form explanation.
"""
def __init__(self, field_id, for_name, file):
self.field_id = field_id
self.file = file
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, file=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
import flask
from digits.webapp import app
html = ''
# get the text from the html file
with app.app_context():
html = flask.render_template(file if file else self.file)
if len(html) == 0: return ''
return wtforms.widgets.HTMLString(
('<div id="%s_explanation" style="display:none;">\n'
'%s'
'</div>\n'
'<a href=# onClick="bootbox.alert($(\'#%s_explanation\').html()); return false;"><span class="glyphicon glyphicon-question-sign"></span></a>\n'
) % (self.for_name, html, self.for_name))
def __repr__(self):
return 'Explanation(%r, %r)' % (self.field_id, self.for_name, self.file)
class IntegerField(wtforms.IntegerField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FloatField(wtforms.FloatField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectField(wtforms.SelectField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectMultipleField(wtforms.SelectMultipleField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectMultipleField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextField(wtforms.TextField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class StringField(wtforms.StringField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(StringField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FileField(wtforms.FileField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FileField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextAreaField(wtforms.TextAreaField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextAreaField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
|
{
"content_hash": "26bf9edf812485e627e904fb4a4b4469",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 156,
"avg_line_length": 36.35602094240838,
"alnum_prop": 0.6038306451612904,
"repo_name": "semisight/DIGITS",
"id": "4660cd0218a42ad136b60922147f226a94f5b9b7",
"size": "7009",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "digits/utils/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "710"
},
{
"name": "HTML",
"bytes": "143289"
},
{
"name": "JavaScript",
"bytes": "107628"
},
{
"name": "Python",
"bytes": "433982"
},
{
"name": "Shell",
"bytes": "1377"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask
app = Flask(__name__)
with app.app_context():
app.config.from_object(app.name + '.config.DefaultConfig')
if 'APP_CONFIG' in os.environ:
app.config.from_envvar('APP_CONFIG')
from .views import app_bp
from .models import *
app.register_blueprint(app_bp, url_prefix=app.config.get('APP_URL_ROOT', '/'))
app.secret_key = 'default-secret-key'
|
{
"content_hash": "c67a12b23fd7abec5b2ae0193593235b",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.6683046683046683,
"repo_name": "0xquad/flask-app-template",
"id": "1607cde4444700375d53227152603985562c9a20",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.tmpl/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7265"
},
{
"name": "Dockerfile",
"bytes": "1446"
},
{
"name": "HTML",
"bytes": "84727"
},
{
"name": "JavaScript",
"bytes": "25"
},
{
"name": "Python",
"bytes": "4627"
}
],
"symlink_target": ""
}
|
import mock
import unittest
from vnc_api.vnc_api import *
from svc_monitor.virtual_machine_manager import VirtualMachineManager
from svc_monitor.config_db import *
import test_common_utils as test_utils
class VirtualMachineManagerTest(unittest.TestCase):
def setUp(self):
VirtualMachineSM._cassandra = mock.MagicMock()
VirtualMachineSM._cassandra.object_read = test_utils.vm_db_read
VirtualMachineInterfaceSM._cassandra = mock.MagicMock()
VirtualMachineInterfaceSM._cassandra.object_read = test_utils.vmi_db_read
InstanceIpSM._cassandra = mock.MagicMock()
InstanceIpSM._cassandra.object_read = test_utils.iip_db_read
InterfaceRouteTableSM._cassandra = mock.MagicMock()
InterfaceRouteTableSM._cassandra.object_read = test_utils.irt_db_read
self.mocked_vnc = mock.MagicMock()
self.mocked_vnc.fq_name_to_id = test_utils.get_vn_id_for_fq_name
self.mocked_vnc.virtual_network_create = test_utils.vn_create
self.mocked_vnc.virtual_machine_interface_create = test_utils.vmi_create
self.mocked_vnc.instance_ip_create = test_utils.iip_create
self.nova_mock = mock.MagicMock()
self.mocked_db = mock.MagicMock()
self.mocked_args = mock.MagicMock()
self.mocked_args.availability_zone = 'default-availability-zone'
self.log_mock = mock.MagicMock()
self.vm_manager = VirtualMachineManager(
db=self.mocked_db, logger=self.log_mock,
vnc_lib=self.mocked_vnc, vrouter_scheduler=mock.MagicMock(),
nova_client=self.nova_mock, args=self.mocked_args,
agent_manager=mock.MagicMock())
def tearDown(self):
ServiceTemplateSM.reset()
ServiceInstanceSM.reset()
InstanceIpSM.reset()
VirtualMachineInterfaceSM.reset()
VirtualMachineSM.reset()
del InterfaceRouteTableSM._cassandra
del VirtualMachineSM._cassandra
def test_virtual_machine_create(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
self.assertTrue(si.availability_zone, 'default-availability-zone')
def test_virtual_machine_delete(self):
vm = test_utils.create_test_virtual_machine('fake-vm-uuid')
self.vm_manager.delete_service(vm)
def test_missing_image_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['image_name'] = None
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with("Image not present in %s" % ((':').join(st.fq_name)))
def test_missing_image_in_nova(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'images' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with("Image not found %s" % si.image)
def test_nova_vm_create_fail(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_any_call(test_utils.AnyStringWith('Nova vm create failed'))
def test_missing_flavor_in_template(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'flavors' and oper == 'find':
return None
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['flavor'] = None
self.vm_manager.create_service(st, si)
self.log_mock.log_error.assert_called_with(test_utils.AnyStringWith("Flavor not found"))
def test_availability_zone_setting(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
st.params['availability_zone_enable'] = True
si.params['availability_zone'] = 'test-availability-zone'
self.vm_manager.create_service(st, si)
self.assertTrue(si.availability_zone, 'test-availability-zone')
def test_network_config_validation(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
st.params['interface_type'] = []
self.vm_manager.create_service(st, si)
self.log_mock.log_notice.assert_called_with("Interface list empty for ST %s SI %s" %
((':').join(st.fq_name), (':').join(si.fq_name)))
def test_virtual_machine_exists(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer(kwargs['name'], kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.mocked_vnc.virtual_machine_create = test_utils.vm_create
self.vm_manager.create_service(st, si)
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith('Launching VM :'))
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith('Created VM :'))
self.log_mock.log_info.assert_any_call(test_utils.AnyStringWith(si.name))
self.log_mock.reset_mock()
self.vm_manager.create_service(st, si)
self.assertTrue(self.log_mock.log_info.call_count, 1)
def test_virtual_machine_static_routes(self):
test_utils.create_test_project('fake-domain:fake-project')
test_utils.create_test_security_group('fake-domain:fake-project:default')
test_utils.create_test_virtual_network('fake-domain:fake-project:left-vn')
test_utils.create_test_virtual_network('fake-domain:fake-project:right-vn')
st = test_utils.create_test_st(name='vm-template',
virt_type='virtual-machine',
intf_list=[['management', False], ['left', True, True], ['right', False]])
si = test_utils.create_test_si(name='vm-instance', count=2,
intf_list=['', 'left-vn', 'right-vn'])
def nova_oper(resource, oper, proj_name, **kwargs):
if resource == 'servers' and oper == 'create':
nova_vm = test_utils.FakeNovaServer('fake-vm-uuid', kwargs['name'])
return nova_vm
else:
return mock.MagicMock()
self.nova_mock.oper = nova_oper
self.vm_manager.create_service(st, si)
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(1))
self.mocked_vnc.virtual_machine_create.assert_any_call(test_utils.VMObjMatcher(2))
|
{
"content_hash": "c5496a90d1e6d866e4f98b62401cf19c",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 104,
"avg_line_length": 48.58267716535433,
"alnum_prop": 0.63354943273906,
"repo_name": "sajuptpm/contrail-controller",
"id": "861cd0f638433f1fbc7ed04a173aa73b1be78cd9",
"size": "12340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/config/svc-monitor/svc_monitor/tests/test_virtual_machine_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "51767"
},
{
"name": "C++",
"bytes": "19050770"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "36777"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5819"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "6129"
},
{
"name": "Python",
"bytes": "4813021"
},
{
"name": "Shell",
"bytes": "81402"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
# Just the print statement...
def msg(s): print(s)
def dashes(): msg(40*'-')
def msgt(s): dashes(); msg(s); dashes()
def msgx(s): msgt(s); sys.exit(0)
|
{
"content_hash": "4bd9d4dc880549e16456a632b7193dce",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6485148514851485,
"repo_name": "jperelli/redmine2github",
"id": "f93a83cf14909ec22afc86db7eaa9b8f79d58f79",
"size": "202",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/utils/msg_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79358"
}
],
"symlink_target": ""
}
|
from jinja2 import nodes
import pytest
from jinja2schema import InvalidExpression
from jinja2schema.config import Config
from jinja2schema.core import parse
from jinja2schema.visitors.expr import (Context, visit_getitem, visit_cond_expr, visit_test,
visit_getattr, visit_compare, visit_const)
from jinja2schema.model import Dictionary, Scalar, List, Unknown, Number, Boolean, Tuple
def get_scalar_context(ast):
return Context(return_struct_cls=Scalar, predicted_struct=Scalar.from_ast(ast))
def test_cond_expr():
template = '''{{ queue if queue is defined else 'wizard' }}''',
ast = parse(template).find(nodes.CondExpr)
rtype, struct = visit_cond_expr(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'queue': Scalar(label='queue', linenos=[1], checked_as_defined=True)
})
assert struct == expected_struct
template = '''{{ queue if queue is undefined else 'wizard' }}'''
ast = parse(template).find(nodes.CondExpr)
rtype, struct = visit_cond_expr(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'queue': Scalar(label='queue', linenos=[1])
})
assert struct == expected_struct
def test_getattr_1():
template = '{{ (x or y).field.subfield[2].a }}'
ast = parse(template).find(nodes.Getattr)
rtype, struct = visit_getattr(ast, get_scalar_context(ast))
x_or_y_dict = {
'field': Dictionary({
'subfield': List(Dictionary({
'a': Scalar(label='a', linenos=[1])
}, linenos=[1]), label='subfield', linenos=[1]),
}, label='field', linenos=[1]),
}
expected_struct = Dictionary({
'x': Dictionary(x_or_y_dict, label='x', linenos=[1]),
'y': Dictionary(x_or_y_dict, label='y', linenos=[1])
})
assert struct == expected_struct
def test_getattr_2():
template = '{{ data.field.subfield }}'
ast = parse(template).find(nodes.Getattr)
rtype, struct = visit_getattr(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'data': Dictionary({
'field': Dictionary({
'subfield': Scalar(label='subfield', linenos=[1]),
}, label='field', linenos=[1]),
}, label='data', linenos=[1]),
})
assert struct == expected_struct
def test_getattr_3():
template = '''{{ a[z][1:\nn][1].x }}'''
ast = parse(template).find(nodes.Getattr)
config = Config()
config.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = 'list'
rtype, struct = visit_getattr(ast, get_scalar_context(ast), {}, config)
expected_struct = Dictionary({
'a': List(
List(
List(
Dictionary({
'x': Scalar(label='x', linenos=[2])
}, linenos=[2]),
linenos=[2]),
linenos=[1]
),
label='a',
linenos=[1]
),
'z': Scalar(label='z', linenos=[1]),
'n': Number(label='n', linenos=[2])
})
assert struct == expected_struct
def test_getitem_1():
template = '''{{ a['b']['c'][1]['d'][x] }}'''
ast = parse(template).find(nodes.Getitem)
config = Config()
config.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = 'list'
rtype, struct = visit_getitem(ast, get_scalar_context(ast), {}, config)
expected_struct = Dictionary({
'a': Dictionary({
'b': Dictionary({
'c': List(Dictionary({
'd': List(Scalar(linenos=[1]), label='d', linenos=[1])
}, linenos=[1]), label='c', linenos=[1]),
}, label='b', linenos=[1]),
}, label='a', linenos=[1]),
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_getitem_2():
template = '''{{ a[z] }}'''
ast = parse(template).find(nodes.Getitem)
config = Config()
config.TYPE_OF_VARIABLE_INDEXED_WITH_VARIABLE_TYPE = 'dictionary'
rtype, struct = visit_getitem(ast, get_scalar_context(ast), {}, config)
expected_struct = Dictionary({
'a': Dictionary(label='a', linenos=[1]),
'z': Scalar(label='z', linenos=[1]),
})
assert struct == expected_struct
def test_getitem_3():
template = '''{{ a[3] }}'''
ast = parse(template).find(nodes.Getitem)
config = Config()
config.TYPE_OF_VARIABLE_INDEXED_WITH_INTEGER_TYPE = 'tuple'
rtype, struct = visit_getitem(ast, get_scalar_context(ast), {}, config)
expected_struct = Dictionary({
'a': Tuple([
Unknown(),
Unknown(),
Unknown(),
Scalar(linenos=[1]),
], label='a', linenos=[1]),
})
assert struct == expected_struct
def test_compare_1():
template = '{{ a < b < c }}'
ast = parse(template).find(nodes.Compare)
rtype, struct = visit_compare(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'a': Unknown(label='a', linenos=[1]),
'b': Unknown(label='b', linenos=[1]),
'c': Unknown(label='c', linenos=[1]),
})
assert struct == expected_struct
def test_compare_2():
template = '{{ a + b[1] - c == 4 == x }}'
ast = parse(template).find(nodes.Compare)
rtype, struct = visit_compare(ast, get_scalar_context(ast))
# TODO make customizable
expected_struct = Dictionary({
'a': Unknown(label='a', linenos=[1]),
'b': List(Unknown(linenos=[1]), label='b', linenos=[1]),
'c': Unknown(label='c', linenos=[1]),
'x': Unknown(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_slice():
template = '''{{ xs[a:2:b] }}'''
ast = parse(template).find(nodes.Getitem)
rtype, struct = visit_getitem(ast, get_scalar_context(ast))
assert struct == Dictionary({
'xs': List(Scalar(linenos=[1]), label='xs', linenos=[1]),
'a': Number(label='a', linenos=[1]),
'b': Number(label='b', linenos=[1]),
})
def test_test_1():
template = '''{{ x is divisibleby (data.field) }}'''
ast = parse(template).find(nodes.Test)
rtype, struct = visit_test(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
'data': Dictionary({
'field': Number(label='field', linenos=[1]),
}, label='data', linenos=[1])
})
assert struct == expected_struct
template = '''{{ x is divisibleby 3 }}'''
ast = parse(template).find(nodes.Test)
rtype, struct = visit_test(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
def test_test_2():
template = '''{{ x is string }}'''
ast = parse(template).find(nodes.Test)
rtype, struct = visit_test(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': Unknown(label='x', linenos=[1])
})
assert struct == expected_struct
template = '{{ x is unknown_filter }}'
ast = parse(template).find(nodes.Test)
with pytest.raises(InvalidExpression) as e:
visit_test(ast, get_scalar_context(ast))
assert 'line 1: unknown test "unknown_filter"' in str(e.value)
def test_compare():
template = '''{{ a < c }}'''
compare_ast = parse(template).find(nodes.Compare)
rtype, struct = visit_compare(compare_ast, get_scalar_context(compare_ast))
expected_rtype = Boolean(linenos=[1])
assert rtype == expected_rtype
def test_const():
template = '''{{ false }}'''
const_ast = parse(template).find(nodes.Const)
rtype, struct = visit_const(const_ast, get_scalar_context(const_ast))
assert rtype == Boolean(constant=True, linenos=[1], value=False)
|
{
"content_hash": "3c57179c473c87f7fdfe3f8d958e287c",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 92,
"avg_line_length": 32.30416666666667,
"alnum_prop": 0.5775828711466529,
"repo_name": "aromanovich/jinja2schema",
"id": "204fa68fe161ad83504a079c9cc7a91b40db2b4f",
"size": "7769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_expr_visitors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "124042"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
}
|
import logging
from nefertari.utils import snake2camel, maybe_dotted
log = logging.getLogger(__name__)
# All actions names(view method names) supported by nefertari
ACTIONS = [
'index', # Collection GET
'create', # Collection POST
'update_many', # Collection PATCH/PUT
'delete_many', # Collection DELETE
'collection_options', # Collection OPTIONS
'show', # Item GET
'update', # Item PATCH
'replace', # Item PUT
'delete', # Item DELETE
'item_options', # Item OPTIONS
]
PERMISSIONS = {
'index': 'view',
'show': 'view',
'create': 'create',
'update': 'update',
'update_many': 'update',
'replace': 'update',
'delete': 'delete',
'delete_many': 'delete',
'collection_options': 'options',
'item_options': 'options',
}
DEFAULT_ID_NAME = 'id'
def get_app_package_name(config):
""" Get package name of app that is running.
Name is either name of app that included nefertari, or
current package name (which is 'nefertari').
"""
if config.includepath:
return config.includepath[0].split(':')[0]
return config.package_name
def get_root_resource(config):
"""Returns the root resource."""
app_package_name = get_app_package_name(config)
return config.registry._root_resources.setdefault(
app_package_name, Resource(config))
def get_resource_map(request):
return request.registry._resources_map
def add_resource_routes(config, view, member_name, collection_name, **kwargs):
"""
``view`` is a dotted name of (or direct reference to) a
Python view class,
e.g. ``'my.package.views.MyView'``.
``member_name`` should be the appropriate singular version of the resource
given your locale and used with members of the collection.
``collection_name`` will be used to refer to the resource collection
methods and should be a plural version of the member_name argument.
All keyword arguments are optional.
``path_prefix``
Prepends the URL path for the Route with the path_prefix
given. This is most useful for cases where you want to mix
resources or relations between resources.
``name_prefix``
Prepends the route names that are generated with the
name_prefix given. Combined with the path_prefix option,
it's easy to generate route names and paths that represent
resources that are in relations.
Example::
config.add_resource_routes(
'myproject.views:CategoryView', 'message', 'messages',
path_prefix='/category/{category_id}',
name_prefix="category_")
# GET /category/7/messages/1
# has named route "category_message"
"""
view = maybe_dotted(view)
path_prefix = kwargs.pop('path_prefix', '')
name_prefix = kwargs.pop('name_prefix', '')
if config.route_prefix:
name_prefix = "%s_%s" % (config.route_prefix, name_prefix)
if collection_name:
id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME)
else:
id_name = ''
path = path_prefix.strip('/') + '/' + (collection_name or member_name)
_factory = kwargs.pop('factory', None)
# If factory is not set, than auth should be False
_auth = kwargs.pop('auth', None) and _factory
_traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None
action_route = {}
added_routes = {}
def add_route_and_view(config, action, route_name, path, request_method,
**route_kwargs):
if route_name not in added_routes:
config.add_route(
route_name, path, factory=_factory,
request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE',
'OPTIONS'],
**route_kwargs)
added_routes[route_name] = path
action_route[action] = route_name
if _auth:
permission = PERMISSIONS[action]
else:
permission = None
config.add_view(view=view, attr=action, route_name=route_name,
request_method=request_method,
permission=permission,
**kwargs)
config.commit()
if collection_name == member_name:
collection_name = collection_name + '_collection'
if collection_name:
add_route_and_view(
config, 'index', name_prefix + collection_name, path,
'GET')
add_route_and_view(
config, 'collection_options', name_prefix + collection_name, path,
'OPTIONS')
add_route_and_view(
config, 'show', name_prefix + member_name, path + id_name,
'GET', traverse=_traverse)
add_route_and_view(
config, 'item_options', name_prefix + member_name, path + id_name,
'OPTIONS', traverse=_traverse)
add_route_and_view(
config, 'replace', name_prefix + member_name, path + id_name,
'PUT', traverse=_traverse)
add_route_and_view(
config, 'update', name_prefix + member_name, path + id_name,
'PATCH', traverse=_traverse)
add_route_and_view(
config, 'create', name_prefix + (collection_name or member_name), path,
'POST')
add_route_and_view(
config, 'delete', name_prefix + member_name, path + id_name,
'DELETE', traverse=_traverse)
if collection_name:
add_route_and_view(
config, 'update_many',
name_prefix + (collection_name or member_name),
path, 'PUT', traverse=_traverse)
add_route_and_view(
config, 'update_many',
name_prefix + (collection_name or member_name),
path, 'PATCH', traverse=_traverse)
add_route_and_view(
config, 'delete_many',
name_prefix + (collection_name or member_name),
path, 'DELETE', traverse=_traverse)
return action_route
def get_default_view_path(resource):
"Returns the dotted path to the default view class."
parts = [a.member_name for a in resource.ancestors] +\
[resource.collection_name or resource.member_name]
if resource.prefix:
parts.insert(-1, resource.prefix)
view_file = '%s' % '_'.join(parts)
view = '%s:%sView' % (view_file, snake2camel(view_file))
app_package_name = get_app_package_name(resource.config)
return '%s.views.%s' % (app_package_name, view)
class Resource(object):
"""Class providing the core functionality.
::
m = Resource(config)
pa = m.add('parent', 'parents')
pa.add('child', 'children')
"""
def __init__(self, config, member_name='', collection_name='',
parent=None, uid='', children=None, id_name='', prefix='',
auth=False, http_cache=0, default_factory=None):
self.__dict__.update(locals())
self.children = children or []
self._ancestors = []
def __repr__(self):
return "%s(uid='%s')" % (self.__class__.__name__, self.uid)
def get_ancestors(self):
"Returns the list of ancestor resources."
if self._ancestors:
return self._ancestors
if not self.parent:
return []
obj = self.resource_map.get(self.parent.uid)
while obj and obj.member_name:
self._ancestors.append(obj)
obj = obj.parent
self._ancestors.reverse()
return self._ancestors
ancestors = property(get_ancestors)
resource_map = property(lambda self: self.config.registry._resources_map)
model_collections = property(
lambda self: self.config.registry._model_collections)
is_root = property(lambda self: not self.member_name)
is_singular = property(
lambda self: not self.is_root and not self.collection_name)
def add(self, member_name, collection_name='', parent=None, uid='',
**kwargs):
"""
:param member_name: singular name of the resource. It should be the
appropriate singular version of the resource given your locale
and used with members of the collection.
:param collection_name: plural name of the resource. It will be used
to refer to the resource collection methods and should be a
plural version of the ``member_name`` argument.
Note: if collection_name is empty, it means resource is singular
:param parent: parent resource name or object.
:param uid: unique name for the resource
:param kwargs:
view: custom view to overwrite the default one.
the rest of the keyward arguments are passed to
add_resource_routes call.
:return: ResourceMap object
"""
# self is the parent resource on which this method is called.
parent = (self.resource_map.get(parent) if type(parent)
is str else parent or self)
prefix = kwargs.pop('prefix', '')
uid = (uid or
':'.join(filter(bool, [parent.uid, prefix, member_name])))
if uid in self.resource_map:
raise ValueError('%s already exists in resource map' % uid)
# Use id_name of parent for singular views to make url generation
# easier
id_name = kwargs.get('id_name', '')
if not id_name and parent:
id_name = parent.id_name
new_resource = Resource(self.config, member_name=member_name,
collection_name=collection_name,
parent=parent, uid=uid,
id_name=id_name,
prefix=prefix)
view = maybe_dotted(
kwargs.pop('view', None) or get_default_view_path(new_resource))
for name, val in kwargs.pop('view_args', {}).items():
setattr(view, name, val)
root_resource = self.config.get_root_resource()
view.root_resource = root_resource
new_resource.view = view
path_segs = []
kwargs['path_prefix'] = ''
for res in new_resource.ancestors:
if not res.is_singular:
if res.id_name:
id_full = res.id_name
else:
id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME)
path_segs.append('%s/{%s}' % (res.collection_name, id_full))
else:
path_segs.append(res.member_name)
if path_segs:
kwargs['path_prefix'] = '/'.join(path_segs)
if prefix:
kwargs['path_prefix'] += '/' + prefix
name_segs = [a.member_name for a in new_resource.ancestors]
name_segs.insert(1, prefix)
name_segs = [seg for seg in name_segs if seg]
if name_segs:
kwargs['name_prefix'] = '_'.join(name_segs) + ':'
new_resource.renderer = kwargs.setdefault(
'renderer', view._default_renderer)
kwargs.setdefault('auth', root_resource.auth)
kwargs.setdefault('factory', root_resource.default_factory)
_factory = maybe_dotted(kwargs['factory'])
kwargs['auth'] = kwargs.get('auth', root_resource.auth)
kwargs['http_cache'] = kwargs.get(
'http_cache', root_resource.http_cache)
new_resource.action_route_map = add_resource_routes(
self.config, view, member_name, collection_name,
**kwargs)
self.resource_map[uid] = new_resource
# add all route names for this resource as keys in the dict,
# so its easy to find it in the view.
self.resource_map.update(dict.fromkeys(
list(new_resource.action_route_map.values()),
new_resource))
# Store resources in {modelName: resource} map if:
# * Its view has Model defined
# * It's not singular
# * Its parent is root or it's not already stored
model = new_resource.view.Model
is_collection = model is not None and not new_resource.is_singular
if is_collection:
is_needed = (model.__name__ not in self.model_collections or
new_resource.parent is root_resource)
if is_needed:
self.model_collections[model.__name__] = new_resource
parent.children.append(new_resource)
view._resource = new_resource
view._factory = _factory
return new_resource
def add_from_child(self, resource, **kwargs):
""" Add a resource with its all children resources to the current
resource.
"""
new_resource = self.add(
resource.member_name, resource.collection_name, **kwargs)
for child in resource.children:
new_resource.add_from_child(child, **kwargs)
|
{
"content_hash": "f69bd92727d0e77d1954274279bfe1f6",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 79,
"avg_line_length": 33.709511568123396,
"alnum_prop": 0.5777472736978571,
"repo_name": "postatum/nefertari",
"id": "93c8d6be774db62e072b29e0794decdd5dc1c05b",
"size": "13113",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nefertari/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "352754"
}
],
"symlink_target": ""
}
|
import os
from subprocess import Popen
def popen_bloom_script(cmd, **kwargs):
this_location = os.path.abspath(os.path.dirname(__file__))
bin_location = os.path.join(this_location, '..', 'bin')
cmd = "%s%s%s" % (bin_location, os.path.sep, cmd)
proc = Popen(cmd, **kwargs)
return proc
|
{
"content_hash": "ead8c2be07bf7c045c5bfa2060fd8ff9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 62,
"avg_line_length": 30.5,
"alnum_prop": 0.6426229508196721,
"repo_name": "nayomal/bloom",
"id": "686ebf39476c1bb56568465098c05f7a25b93ab9",
"size": "305",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/utils/script_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "EmberScript",
"bytes": "2811"
},
{
"name": "Makefile",
"bytes": "3734"
},
{
"name": "Python",
"bytes": "414527"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sqlite3
import pytest
from django.conf import settings
from django.db import connection
from django.db import transaction
from .compat import force_text
from .app.models import Item
DB_NAME = settings.DATABASES['default']['NAME']
if DB_NAME == ':memory:':
TEST_DB_NAME = DB_NAME
else:
DB_NAME += '_db_test'
TEST_DB_NAME = 'test_' + DB_NAME
def get_db_engine():
from django.conf import settings
return settings.DATABASES['default']['ENGINE'].split('.')[-1]
class CmdResult(object):
def __init__(self, status_code, std_out, std_err):
self.status_code = status_code
self.std_out = std_out
self.std_err = std_err
def run_cmd(*args):
r = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = r.communicate()
ret = r.wait()
return CmdResult(ret, stdoutdata, stderrdata)
def run_mysql(*args):
from django.conf import settings
user = settings.DATABASES['default'].get('USER', None)
if user:
args = ('-u', user) + tuple(args)
args = ('mysql',) + tuple(args)
return run_cmd(*args)
def skip_if_sqlite_in_memory():
from django.conf import settings
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3' \
and settings.DATABASES['default']['NAME'] == ':memory:':
pytest.skip('Do not test db reuse since database does not support it')
def create_empty_production_database():
drop_database(name=DB_NAME)
if get_db_engine() == 'postgresql_psycopg2':
r = run_cmd('psql', 'postgres', '-c', 'CREATE DATABASE %s' % DB_NAME)
assert ('CREATE DATABASE' in force_text(r.std_out) or
'already exists' in force_text(r.std_err))
return
if get_db_engine() == 'mysql':
r = run_mysql('-e', 'CREATE DATABASE %s' % DB_NAME)
assert (r.status_code == 0 or
'database exists' in force_text(r.std_out) or
'database exists' in force_text(r.std_err))
return
if get_db_engine() == 'sqlite3':
if DB_NAME == ':memory:':
raise AssertionError(
'sqlite in-memory database must not be created!')
open(DB_NAME, 'a').close()
return
raise AssertionError('%s cannot be tested properly' % get_db_engine())
def drop_database(name=TEST_DB_NAME, suffix=None):
assert bool(name) ^ bool(suffix), 'name and suffix cannot be used together'
if suffix:
name = '%s_%s' % (name, suffix)
if get_db_engine() == 'postgresql_psycopg2':
r = run_cmd('psql', 'postgres', '-c', 'DROP DATABASE %s' % name)
assert ('DROP DATABASE' in force_text(r.std_out) or
'does not exist' in force_text(r.std_err))
return
if get_db_engine() == 'mysql':
r = run_mysql('-e', 'DROP DATABASE %s' % name)
assert ('database doesn\'t exist' in force_text(r.std_err) or
r.status_code == 0)
return
if get_db_engine() == 'sqlite3':
if name == ':memory:':
raise AssertionError(
'sqlite in-memory database cannot be dropped!')
if os.path.exists(name):
os.unlink(name)
return
raise AssertionError('%s cannot be tested properly!' % get_db_engine())
def db_exists(db_suffix=None):
name = TEST_DB_NAME
if db_suffix:
name = '%s_%s' % (name, db_suffix)
if get_db_engine() == 'postgresql_psycopg2':
r = run_cmd('psql', name, '-c', 'SELECT 1')
return r.status_code == 0
if get_db_engine() == 'mysql':
r = run_mysql(name, '-e', 'SELECT 1')
return r.status_code == 0
if get_db_engine() == 'sqlite3':
if TEST_DB_NAME == ':memory:':
raise AssertionError(
'sqlite in-memory database cannot be checked for existence!')
return os.path.exists(name)
raise AssertionError('%s cannot be tested properly!' % get_db_engine())
def mark_database():
if get_db_engine() == 'postgresql_psycopg2':
r = run_cmd('psql', TEST_DB_NAME, '-c', 'CREATE TABLE mark_table();')
assert r.status_code == 0
return
if get_db_engine() == 'mysql':
r = run_mysql(TEST_DB_NAME, '-e', 'CREATE TABLE mark_table(kaka int);')
assert r.status_code == 0
return
if get_db_engine() == 'sqlite3':
if TEST_DB_NAME == ':memory:':
raise AssertionError('sqlite in-memory database cannot be marked!')
conn = sqlite3.connect(TEST_DB_NAME)
try:
with conn:
conn.execute('CREATE TABLE mark_table(kaka int);')
finally: # Close the DB even if an error is raised
conn.close()
return
raise AssertionError('%s cannot be tested properly!' % get_db_engine())
def mark_exists():
if get_db_engine() == 'postgresql_psycopg2':
r = run_cmd('psql', TEST_DB_NAME, '-c', 'SELECT 1 FROM mark_table')
# When something pops out on std_out, we are good
return bool(r.std_out)
if get_db_engine() == 'mysql':
r = run_mysql(TEST_DB_NAME, '-e', 'SELECT 1 FROM mark_table')
return r.status_code == 0
if get_db_engine() == 'sqlite3':
if TEST_DB_NAME == ':memory:':
raise AssertionError(
'sqlite in-memory database cannot be checked for mark!')
conn = sqlite3.connect(TEST_DB_NAME)
try:
with conn:
conn.execute('SELECT 1 FROM mark_table')
return True
except sqlite3.OperationalError:
return False
finally: # Close the DB even if an error is raised
conn.close()
raise AssertionError('%s cannot be tested properly!' % get_db_engine())
def noop_transactions():
"""Test whether transactions are disabled.
Return True if transactions are disabled, False if they are
enabled.
"""
# Newer versions of Django simply run standard tests in an atomic block.
if hasattr(connection, 'in_atomic_block'):
return connection.in_atomic_block
else:
with transaction.commit_manually():
Item.objects.create(name='transaction_noop_test')
transaction.rollback()
try:
item = Item.objects.get(name='transaction_noop_test')
except Item.DoesNotExist:
return False
else:
item.delete()
return True
|
{
"content_hash": "0b8581e42fabb13d40cc0b1cdce57d6f",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 80,
"avg_line_length": 30.35514018691589,
"alnum_prop": 0.5897475369458128,
"repo_name": "felixonmars/pytest-django",
"id": "f7460fce596d8c252c27b572138a0d488c8a0852",
"size": "6496",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "pytest_django_test/db_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "135372"
}
],
"symlink_target": ""
}
|
import traceback
from oslo.config import cfg
import taskflow.engines
from taskflow.patterns import linear_flow
from taskflow.utils import misc
from cinder import exception
from cinder import flow_utils
from cinder.image import glance
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import timeutils
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
CONF = cfg.CONF
# These attributes we will attempt to save for the volume if they exist
# in the source image metadata.
IMAGE_ATTRIBUTES = (
'checksum',
'container_format',
'disk_format',
'min_disk',
'min_ram',
'size',
)
class OnFailureRescheduleTask(flow_utils.CinderTask):
"""Triggers a rescheduling request to be sent when reverting occurs.
Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets
sent to the scheduler rpc api to allow for an attempt X of Y for scheduling
this volume elsewhere.
"""
def __init__(self, reschedule_context, db, scheduler_rpcapi):
requires = ['filter_properties', 'image_id', 'request_spec',
'snapshot_id', 'volume_id', 'context']
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
requires=requires)
self.scheduler_rpcapi = scheduler_rpcapi
self.db = db
self.reschedule_context = reschedule_context
# These exception types will trigger the volume to be set into error
# status rather than being rescheduled.
self.no_reschedule_types = [
# Image copying happens after volume creation so rescheduling due
# to copy failure will mean the same volume will be created at
# another place when it still exists locally.
exception.ImageCopyFailure,
# Metadata updates happen after the volume has been created so if
# they fail, rescheduling will likely attempt to create the volume
# on another machine when it still exists locally.
exception.MetadataCopyFailure,
exception.MetadataCreateFailure,
exception.MetadataUpdateFailure,
# The volume/snapshot has been removed from the database, that
# can not be fixed by rescheduling.
exception.VolumeNotFound,
exception.SnapshotNotFound,
exception.VolumeTypeNotFound,
exception.ImageUnacceptable,
]
def execute(self, **kwargs):
pass
def _reschedule(self, context, cause, request_spec, filter_properties,
snapshot_id, image_id, volume_id, **kwargs):
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
if not filter_properties:
filter_properties = {}
if 'retry' not in filter_properties:
filter_properties['retry'] = {}
retry_info = filter_properties['retry']
num_attempts = retry_info.get('num_attempts', 0)
request_spec['volume_id'] = volume_id
LOG.debug(_("Volume %(volume_id)s: re-scheduling %(method)s "
"attempt %(num)d due to %(reason)s") %
{'volume_id': volume_id,
'method': common.make_pretty_name(create_volume),
'num': num_attempts,
'reason': cause.exception_str})
if all(cause.exc_info):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
return create_volume(context, CONF.volume_topic, volume_id,
snapshot_id=snapshot_id, image_id=image_id,
request_spec=request_spec,
filter_properties=filter_properties)
def _post_reschedule(self, context, volume_id):
"""Actions that happen after the rescheduling attempt occur here."""
LOG.debug(_("Volume %s: re-scheduled"), volume_id)
def _pre_reschedule(self, context, volume_id):
"""Actions that happen before the rescheduling attempt occur here."""
try:
# Reset the volume state.
#
# NOTE(harlowja): this is awkward to be done here, shouldn't
# this happen at the scheduler itself and not before it gets
# sent to the scheduler? (since what happens if it never gets
# there??). It's almost like we need a status of 'on-the-way-to
# scheduler' in the future.
update = {
'status': 'creating',
'scheduled_at': timeutils.utcnow(),
}
LOG.debug(_("Updating volume %(volume_id)s with %(update)s.") %
{'update': update, 'volume_id': volume_id})
self.db.volume_update(context, volume_id, update)
except exception.CinderException:
# Don't let resetting the status cause the rescheduling to fail.
LOG.exception(_("Volume %s: resetting 'creating' status failed."),
volume_id)
def revert(self, context, result, flow_failures, **kwargs):
# Check if we have a cause which can tell us not to reschedule.
for failure in flow_failures.values():
if failure.check(*self.no_reschedule_types):
return
volume_id = kwargs['volume_id']
# Use a different context when rescheduling.
if self.reschedule_context:
context = self.reschedule_context
try:
cause = list(flow_failures.values())[0]
self._pre_reschedule(context, volume_id)
self._reschedule(context, cause, **kwargs)
self._post_reschedule(context, volume_id)
except exception.CinderException:
LOG.exception(_("Volume %s: rescheduling failed"), volume_id)
class ExtractVolumeRefTask(flow_utils.CinderTask):
"""Extracts volume reference for given volume id."""
default_provides = 'volume_ref'
def __init__(self, db, host):
super(ExtractVolumeRefTask, self).__init__(addons=[ACTION])
self.db = db
self.host = host
def execute(self, context, volume_id):
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
# NOTE(vish): so we don't have to get volume from db again before
# passing it to the driver.
volume_ref['host'] = self.host
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
if isinstance(result, misc.Failure):
return
common.error_out_volume(context, self.db, volume_id)
LOG.error(_("Volume %s: create failed"), volume_id)
class ExtractVolumeSpecTask(flow_utils.CinderTask):
"""Extracts a spec of a volume to be created into a common structure.
This task extracts and organizes the input requirements into a common
and easier to analyze structure for later tasks to use. It will also
attach the underlying database volume reference which can be used by
other tasks to reference for further details about the volume to be.
Reversion strategy: N/A
"""
default_provides = 'volume_spec'
def __init__(self, db):
requires = ['image_id', 'snapshot_id', 'source_volid']
super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION],
requires=requires)
self.db = db
def execute(self, context, volume_ref, **kwargs):
get_remote_image_service = glance.get_remote_image_service
volume_name = volume_ref['name']
volume_size = utils.as_int(volume_ref['size'], quiet=False)
# Create a dictionary that will represent the volume to be so that
# later tasks can easily switch between the different types and create
# the volume according to the volume types specifications (which are
# represented in this dictionary).
specs = {
'status': volume_ref['status'],
'type': 'raw', # This will have the type of the volume to be
# created, which should be one of [raw, snap,
# source_vol, image]
'volume_id': volume_ref['id'],
'volume_name': volume_name,
'volume_size': volume_size,
}
if kwargs.get('snapshot_id'):
# We are making a snapshot based volume instead of a raw volume.
specs.update({
'type': 'snap',
'snapshot_id': kwargs['snapshot_id'],
})
elif kwargs.get('source_volid'):
# We are making a source based volume instead of a raw volume.
#
# NOTE(harlowja): This will likely fail if the source volume
# disappeared by the time this call occurred.
source_volid = kwargs['source_volid']
source_volume_ref = self.db.volume_get(context, source_volid)
specs.update({
'source_volid': source_volid,
# This is captured incase we have to revert and we want to set
# back the source volume status to its original status. This
# may or may not be sketchy to do??
'source_volstatus': source_volume_ref['status'],
'type': 'source_vol',
})
elif kwargs.get('image_id'):
# We are making a image based volume instead of a raw volume.
image_href = kwargs['image_id']
image_service, image_id = get_remote_image_service(context,
image_href)
specs.update({
'type': 'image',
'image_id': image_id,
'image_location': image_service.get_location(context,
image_id),
'image_meta': image_service.show(context, image_id),
# Instead of refetching the image service later just save it.
#
# NOTE(harlowja): if we have to later recover this tasks output
# on another 'node' that this object won't be able to be
# serialized, so we will have to recreate this object on
# demand in the future.
'image_service': image_service,
})
return specs
def revert(self, context, result, **kwargs):
if isinstance(result, misc.Failure):
return
volume_spec = result.get('volume_spec')
# Restore the source volume status and set the volume to error status.
common.restore_source_status(context, self.db, volume_spec)
class NotifyVolumeActionTask(flow_utils.CinderTask):
"""Performs a notification about the given volume when called.
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(NotifyVolumeActionTask, self).__init__(addons=[ACTION,
event_suffix])
self.db = db
self.event_suffix = event_suffix
def execute(self, context, volume_ref):
volume_id = volume_ref['id']
try:
volume_utils.notify_about_volume_usage(context, volume_ref,
self.event_suffix,
host=volume_ref['host'])
except exception.CinderException:
# If notification sending of volume database entry reading fails
# then we shouldn't error out the whole workflow since this is
# not always information that must be sent for volumes to operate
LOG.exception(_("Failed notifying about the volume"
" action %(event)s for volume %(volume_id)s") %
{'event': self.event_suffix,
'volume_id': volume_id})
class CreateVolumeFromSpecTask(flow_utils.CinderTask):
"""Creates a volume from a provided specification.
Reversion strategy: N/A
"""
default_provides = 'volume'
def __init__(self, db, driver):
super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION])
self.db = db
self.driver = driver
# This maps the different volume specification types into the methods
# that can create said volume type (aka this is a jump table).
self._create_func_mapping = {
'raw': self._create_raw_volume,
'snap': self._create_from_snapshot,
'source_vol': self._create_from_source_volume,
'image': self._create_from_image,
}
def _handle_bootable_volume_glance_meta(self, context, volume_id,
**kwargs):
"""Enable bootable flag and properly handle glance metadata.
Caller should provide one and only one of snapshot_id,source_volid
and image_id. If an image_id specified, a image_meta should also be
provided, otherwise will be treated as an empty dictionary.
"""
log_template = _("Copying metadata from %(src_type)s %(src_id)s to "
"%(vol_id)s.")
exception_template = _("Failed updating volume %(vol_id)s metadata"
" using the provided %(src_type)s"
" %(src_id)s metadata")
src_type = None
src_id = None
self._enable_bootable_flag(context, volume_id)
try:
if kwargs.get('snapshot_id'):
src_type = 'snapshot'
src_id = kwargs['snapshot_id']
snapshot_id = src_id
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_to_volume(
context, volume_id, snapshot_id)
elif kwargs.get('source_volid'):
src_type = 'source volume'
src_id = kwargs['source_volid']
source_volid = src_id
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context,
source_volid,
volume_id)
elif kwargs.get('image_id'):
src_type = 'image'
src_id = kwargs['image_id']
image_id = src_id
image_meta = kwargs.get('image_meta', {})
LOG.debug(log_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
self._capture_volume_image_metadata(context, volume_id,
image_id, image_meta)
except exception.CinderException as ex:
LOG.exception(exception_template % {'src_type': src_type,
'src_id': src_id,
'vol_id': volume_id})
raise exception.MetadataCopyFailure(reason=ex)
def _create_from_snapshot(self, context, volume_ref, snapshot_id,
**kwargs):
volume_id = volume_ref['id']
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
model_update = self.driver.create_volume_from_snapshot(volume_ref,
snapshot_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
make_bootable = False
try:
originating_vref = self.db.volume_get(context,
snapshot_ref['volume_id'])
make_bootable = originating_vref.bootable
except exception.CinderException as ex:
LOG.exception(_("Failed fetching snapshot %(snapshot_id)s bootable"
" flag using the provided glance snapshot "
"%(snapshot_ref_id)s volume reference") %
{'snapshot_id': snapshot_id,
'snapshot_ref_id': snapshot_ref['volume_id']})
raise exception.MetadataUpdateFailure(reason=ex)
if make_bootable:
self._handle_bootable_volume_glance_meta(context, volume_id,
snapshot_id=snapshot_id)
return model_update
def _enable_bootable_flag(self, context, volume_id):
try:
LOG.debug(_('Marking volume %s as bootable.'), volume_id)
self.db.volume_update(context, volume_id, {'bootable': True})
except exception.CinderException as ex:
LOG.exception(_("Failed updating volume %(volume_id)s bootable"
" flag to true") % {'volume_id': volume_id})
raise exception.MetadataUpdateFailure(reason=ex)
def _create_from_source_volume(self, context, volume_ref,
source_volid, **kwargs):
# NOTE(harlowja): if the source volume has disappeared this will be our
# detection of that since this database call should fail.
#
# NOTE(harlowja): likely this is not the best place for this to happen
# and we should have proper locks on the source volume while actions
# that use the source volume are underway.
srcvol_ref = self.db.volume_get(context, source_volid)
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
# NOTE(harlowja): Subtasks would be useful here since after this
# point the volume has already been created and further failures
# will not destroy the volume (although they could in the future).
if srcvol_ref.bootable:
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
source_volid=source_volid)
return model_update
def _copy_image_to_volume(self, context, volume_ref,
image_id, image_location, image_service):
"""Downloads Glance image to the specified volume."""
copy_image_to_volume = self.driver.copy_image_to_volume
volume_id = volume_ref['id']
LOG.debug(_("Attempting download of %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s.") %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
try:
copy_image_to_volume(context, volume_ref, image_service, image_id)
except processutils.ProcessExecutionError as ex:
LOG.error(_("Failed to copy image %(image_id)s to volume: "
"%(volume_id)s, error: %(error)s") %
{'volume_id': volume_id,
'error': ex.stderr, 'image_id': image_id})
raise exception.ImageCopyFailure(reason=ex.stderr)
except exception.ImageUnacceptable as ex:
LOG.error(_("Failed to copy image to volume: %(volume_id)s, "
"error: %(error)s") % {'volume_id': volume_id,
'error': ex})
raise exception.ImageUnacceptable(ex)
except Exception as ex:
LOG.error(_("Failed to copy image %(image_id)s to "
"volume: %(volume_id)s, error: %(error)s") %
{'volume_id': volume_id, 'error': ex,
'image_id': image_id})
if not isinstance(ex, exception.ImageCopyFailure):
raise exception.ImageCopyFailure(reason=ex)
else:
raise
LOG.debug(_("Downloaded image %(image_id)s (%(image_location)s)"
" to volume %(volume_id)s successfully.") %
{'image_id': image_id, 'volume_id': volume_id,
'image_location': image_location})
def _capture_volume_image_metadata(self, context, volume_id,
image_id, image_meta):
# Save some base attributes into the volume metadata
base_metadata = {
'image_id': image_id,
}
name = image_meta.get('name', None)
if name:
base_metadata['image_name'] = name
# Save some more attributes into the volume metadata from the image
# metadata
for key in IMAGE_ATTRIBUTES:
if key not in image_meta:
continue
value = image_meta.get(key, None)
if value is not None:
base_metadata[key] = value
# Save all the image metadata properties into the volume metadata
property_metadata = {}
image_properties = image_meta.get('properties', {})
for (key, value) in image_properties.items():
if value is not None:
property_metadata[key] = value
# NOTE(harlowja): The best way for this to happen would be in bulk,
# but that doesn't seem to exist (yet), so we go through one by one
# which means we can have partial create/update failure.
volume_metadata = dict(property_metadata)
volume_metadata.update(base_metadata)
LOG.debug(_("Creating volume glance metadata for volume %(volume_id)s"
" backed by image %(image_id)s with: %(vol_metadata)s.") %
{'volume_id': volume_id, 'image_id': image_id,
'vol_metadata': volume_metadata})
for (key, value) in volume_metadata.items():
try:
self.db.volume_glance_metadata_create(context, volume_id,
key, value)
except exception.GlanceMetadataExists:
pass
def _create_from_image(self, context, volume_ref,
image_location, image_id, image_meta,
image_service, **kwargs):
LOG.debug(_("Cloning %(volume_id)s from image %(image_id)s "
" at location %(image_location)s.") %
{'volume_id': volume_ref['id'],
'image_location': image_location, 'image_id': image_id})
# Create the volume from an image.
#
# NOTE (singn): two params need to be returned
# dict containing provider_location for cloned volume
# and clone status.
model_update, cloned = self.driver.clone_image(
volume_ref, image_location, image_id, image_meta)
if not cloned:
# TODO(harlowja): what needs to be rolled back in the clone if this
# volume create fails?? Likely this should be a subflow or broken
# out task in the future. That will bring up the question of how
# do we make said subflow/task which is only triggered in the
# clone image 'path' resumable and revertable in the correct
# manner.
#
# Create the volume and then download the image onto the volume.
model_update = self.driver.create_volume(volume_ref)
updates = dict(model_update or dict(), status='downloading')
try:
volume_ref = self.db.volume_update(context,
volume_ref['id'], updates)
except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with "
"%(updates)s") %
{'volume_id': volume_ref['id'],
'updates': updates})
self._copy_image_to_volume(context, volume_ref,
image_id, image_location, image_service)
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
image_id=image_id,
image_meta=image_meta)
return model_update
def _create_raw_volume(self, context, volume_ref, **kwargs):
return self.driver.create_volume(volume_ref)
def execute(self, context, volume_ref, volume_spec):
volume_spec = dict(volume_spec)
volume_id = volume_spec.pop('volume_id', None)
# we can't do anything if the driver didn't init
if not self.driver.initialized:
driver_name = self.driver.__class__.__name__
LOG.error(_("Unable to create volume. "
"Volume driver %s not initialized") % driver_name)
# NOTE(flaper87): Set the error status before
# raising any exception.
self.db.volume_update(context, volume_id, dict(status='error'))
raise exception.DriverNotInitialized()
create_type = volume_spec.pop('type', None)
create_functor = self._create_func_mapping.get(create_type)
if not create_functor:
raise exception.VolumeTypeNotFound(volume_type_id=create_type)
if not volume_id:
volume_id = volume_ref['id']
LOG.info(_("Volume %(volume_id)s: being created using %(functor)s "
"with specification: %(volume_spec)s") %
{'volume_spec': volume_spec, 'volume_id': volume_id,
'functor': common.make_pretty_name(create_functor)})
# Call the given functor to make the volume.
model_update = create_functor(context, volume_ref=volume_ref,
**volume_spec)
# Persist any model information provided on creation.
try:
if model_update:
volume_ref = self.db.volume_update(context, volume_ref['id'],
model_update)
except exception.CinderException:
# If somehow the update failed we want to ensure that the
# failure is logged (but not try rescheduling since the volume at
# this point has been created).
LOG.exception(_("Failed updating model of volume %(volume_id)s"
" with creation provided model %(model)s") %
{'volume_id': volume_id, 'model': model_update})
raise
return volume_ref
class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
"""On successful volume creation this will perform final volume actions.
When a volume is created successfully it is expected that MQ notifications
and database updates will occur to 'signal' to others that the volume is
now ready for usage. This task does those notifications and updates in a
reliable manner (not re-raising exceptions if said actions can not be
triggered).
Reversion strategy: N/A
"""
def __init__(self, db, event_suffix):
super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix)
self.status_translation = {
'migration_target_creating': 'migration_target',
}
def execute(self, context, volume, volume_spec):
volume_id = volume['id']
new_status = self.status_translation.get(volume_spec.get('status'),
'available')
update = {
'status': new_status,
'launched_at': timeutils.utcnow(),
}
try:
# TODO(harlowja): is it acceptable to only log if this fails??
# or are there other side-effects that this will cause if the
# status isn't updated correctly (aka it will likely be stuck in
# 'building' if this fails)??
volume_ref = self.db.volume_update(context, volume_id, update)
# Now use the parent to notify.
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
except exception.CinderException:
LOG.exception(_("Failed updating volume %(volume_id)s with "
"%(update)s") % {'volume_id': volume_id,
'update': update})
# Even if the update fails, the volume is ready.
msg = _("Volume %(volume_name)s (%(volume_id)s): created successfully")
LOG.info(msg % {
'volume_name': volume_spec['volume_name'],
'volume_id': volume_id,
})
def get_flow(context, db, driver, scheduler_rpcapi, host, volume_id,
allow_reschedule, reschedule_context, request_spec,
filter_properties, snapshot_id=None, image_id=None,
source_volid=None):
"""Constructs and returns the manager entrypoint flow.
This flow will do the following:
1. Determines if rescheduling is enabled (ahead of time).
2. Inject keys & values for dependent tasks.
3. Selects 1 of 2 activated only on *failure* tasks (one to update the db
status & notify or one to update the db status & notify & *reschedule*).
4. Extracts a volume specification from the provided inputs.
5. Notifies that the volume has start to be created.
6. Creates a volume from the extracted volume specification.
7. Attaches a on-success *only* task that notifies that the volume creation
has ended and performs further database status updates.
"""
flow_name = ACTION.replace(":", "_") + "_manager"
volume_flow = linear_flow.Flow(flow_name)
# This injects the initial starting flow values into the workflow so that
# the dependency order of the tasks provides/requires can be correctly
# determined.
create_what = {
'context': context,
'filter_properties': filter_properties,
'image_id': image_id,
'request_spec': request_spec,
'snapshot_id': snapshot_id,
'source_volid': source_volid,
'volume_id': volume_id,
}
volume_flow.add(ExtractVolumeRefTask(db, host))
if allow_reschedule and request_spec:
volume_flow.add(OnFailureRescheduleTask(reschedule_context,
db, scheduler_rpcapi))
volume_flow.add(ExtractVolumeSpecTask(db),
NotifyVolumeActionTask(db, "create.start"),
CreateVolumeFromSpecTask(db, driver),
CreateVolumeOnFinishTask(db, "create.end"))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(volume_flow, store=create_what)
|
{
"content_hash": "d36a45a74bbae1ac9ab22fca5580e4d9",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 79,
"avg_line_length": 45.48414985590778,
"alnum_prop": 0.5678261420515744,
"repo_name": "NeCTAR-RC/cinder",
"id": "f573ccbab6d2a9ecc9fcfa8cfe7c99fc9e0f6908",
"size": "32139",
"binary": false,
"copies": "3",
"ref": "refs/heads/nectar/icehouse",
"path": "cinder/volume/flows/manager/create_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "6176241"
},
{
"name": "Shell",
"bytes": "15237"
}
],
"symlink_target": ""
}
|
from .unidecode import unidecode
|
{
"content_hash": "53a27871c530d8b6cd141bad0745a4cf",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 33,
"alnum_prop": 0.8484848484848485,
"repo_name": "redvasily/isounidecode",
"id": "6607ba61bdb069cc45818888d44b50d0bb4542d8",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isounidecode/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2220627"
}
],
"symlink_target": ""
}
|
"""Module for tests of the resources application."""
|
{
"content_hash": "60ea5b01f936a5e289a5e9bd869455ec",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 52,
"avg_line_length": 53,
"alnum_prop": 0.7358490566037735,
"repo_name": "uccser/cs-unplugged",
"id": "dc3501825dbe6276b977d3517f83214ff9ba5c83",
"size": "53",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/tests/resources/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
}
|
__all__ = ['floquet_modes', 'floquet_modes_t', 'floquet_modes_table',
'floquet_modes_t_lookup', 'floquet_states', 'floquet_states_t',
'floquet_wavefunction', 'floquet_wavefunction_t',
'floquet_state_decomposition', 'fsesolve',
'floquet_master_equation_rates', 'floquet_collapse_operators',
'floquet_master_equation_tensor',
'floquet_master_equation_steadystate', 'floquet_basis_transform',
'floquet_markov_mesolve', 'fmmesolve']
import numpy as np
import scipy.linalg as la
import scipy
from scipy import angle, pi, exp, sqrt
from types import FunctionType
from qutip.qobj import Qobj, isket
from qutip.superoperator import vec2mat_index, mat2vec, vec2mat
from qutip.mesolve import mesolve
from qutip.steadystate import steadystate
from qutip.states import ket2dm
from qutip.states import projection
from qutip.solver import Options
from qutip.propagator import propagator
from qutip.solver import Result
from qutip.cy.spmatfuncs import cy_ode_rhs
from qutip.expect import expect
from qutip.utilities import n_thermal
def floquet_modes(H, T, args=None, sort=False, U=None):
"""
Calculate the initial Floquet modes Phi_alpha(0) for a driven system with
period T.
Returns a list of :class:`qutip.qobj` instances representing the Floquet
modes and a list of corresponding quasienergies, sorted by increasing
quasienergy in the interval [-pi/T, pi/T]. The optional parameter `sort`
decides if the output is to be sorted in increasing quasienergies or not.
Parameters
----------
H : :class:`qutip.qobj`
system Hamiltonian, time-dependent with period `T`
args : dictionary
dictionary with variables required to evaluate H
T : float
The period of the time-dependence of the hamiltonian. The default value
'None' indicates that the 'tlist' spans a single period of the driving.
U : :class:`qutip.qobj`
The propagator for the time-dependent Hamiltonian with period `T`.
If U is `None` (default), it will be calculated from the Hamiltonian
`H` using :func:`qutip.propagator.propagator`.
Returns
-------
output : list of kets, list of quasi energies
Two lists: the Floquet modes as kets and the quasi energies.
"""
if U is None:
# get the unitary propagator
U = propagator(H, T, [], args)
# find the eigenstates for the propagator
evals, evecs = la.eig(U.full())
eargs = angle(evals)
# make sure that the phase is in the interval [-pi, pi], so that
# the quasi energy is in the interval [-pi/T, pi/T] where T is the
# period of the driving. eargs += (eargs <= -2*pi) * (2*pi) +
# (eargs > 0) * (-2*pi)
eargs += (eargs <= -pi) * (2 * pi) + (eargs > pi) * (-2 * pi)
e_quasi = -eargs / T
# sort by the quasi energy
if sort:
order = np.argsort(-e_quasi)
else:
order = list(range(len(evals)))
# prepare a list of kets for the floquet states
new_dims = [U.dims[0], [1] * len(U.dims[0])]
new_shape = [U.shape[0], 1]
kets_order = [Qobj(np.matrix(evecs[:, o]).T,
dims=new_dims, shape=new_shape) for o in order]
return kets_order, e_quasi[order]
def floquet_modes_t(f_modes_0, f_energies, t, H, T, args=None):
"""
Calculate the Floquet modes at times tlist Phi_alpha(tlist) propagting the
initial Floquet modes Phi_alpha(0)
Parameters
----------
f_modes_0 : list of :class:`qutip.qobj` (kets)
Floquet modes at :math:`t`
f_energies : list
Floquet energies.
t : float
The time at which to evaluate the floquet modes.
H : :class:`qutip.qobj`
system Hamiltonian, time-dependent with period `T`
args : dictionary
dictionary with variables required to evaluate H
T : float
The period of the time-dependence of the hamiltonian.
Returns
-------
output : list of kets
The Floquet modes as kets at time :math:`t`
"""
# find t in [0,T] such that t_orig = t + n * T for integer n
t = t - int(t / T) * T
f_modes_t = []
# get the unitary propagator from 0 to t
if t > 0.0:
U = propagator(H, t, [], args)
for n in np.arange(len(f_modes_0)):
f_modes_t.append(U * f_modes_0[n] * exp(1j * f_energies[n] * t))
else:
f_modes_t = f_modes_0
return f_modes_t
def floquet_modes_table(f_modes_0, f_energies, tlist, H, T, args=None):
"""
Pre-calculate the Floquet modes for a range of times spanning the floquet
period. Can later be used as a table to look up the floquet modes for
any time.
Parameters
----------
f_modes_0 : list of :class:`qutip.qobj` (kets)
Floquet modes at :math:`t`
f_energies : list
Floquet energies.
tlist : array
The list of times at which to evaluate the floquet modes.
H : :class:`qutip.qobj`
system Hamiltonian, time-dependent with period `T`
T : float
The period of the time-dependence of the hamiltonian.
args : dictionary
dictionary with variables required to evaluate H
Returns
-------
output : nested list
A nested list of Floquet modes as kets for each time in `tlist`
"""
# truncate tlist to the driving period
tlist_period = tlist[np.where(tlist <= T)]
f_modes_table_t = [[] for t in tlist_period]
opt = Options()
opt.rhs_reuse = True
for n, f_mode in enumerate(f_modes_0):
output = mesolve(H, f_mode, tlist_period, [], [], args, opt)
for t_idx, f_state_t in enumerate(output.states):
f_modes_table_t[t_idx].append(
f_state_t * exp(1j * f_energies[n] * tlist_period[t_idx]))
return f_modes_table_t
def floquet_modes_t_lookup(f_modes_table_t, t, T):
"""
Lookup the floquet mode at time t in the pre-calculated table of floquet
modes in the first period of the time-dependence.
Parameters
----------
f_modes_table_t : nested list of :class:`qutip.qobj` (kets)
A lookup-table of Floquet modes at times precalculated by
:func:`qutip.floquet.floquet_modes_table`.
t : float
The time for which to evaluate the Floquet modes.
T : float
The period of the time-dependence of the hamiltonian.
Returns
-------
output : nested list
A list of Floquet modes as kets for the time that most closely matching
the time `t` in the supplied table of Floquet modes.
"""
# find t_wrap in [0,T] such that t = t_wrap + n * T for integer n
t_wrap = t - int(t / T) * T
# find the index in the table that corresponds to t_wrap (= tlist[t_idx])
t_idx = int(t_wrap / T * len(f_modes_table_t))
# XXX: might want to give a warning if the cast of t_idx to int discard
# a significant fraction in t_idx, which would happen if the list of time
# values isn't perfect matching the driving period
# if debug: print "t = %f -> t_wrap = %f @ %d of %d" % (t, t_wrap, t_idx,
# N)
return f_modes_table_t[t_idx]
def floquet_states(f_modes_t, f_energies, t):
"""
Evaluate the floquet states at time t given the Floquet modes at that time.
Parameters
----------
f_modes_t : list of :class:`qutip.qobj` (kets)
A list of Floquet modes for time :math:`t`.
f_energies : array
The Floquet energies.
t : float
The time for which to evaluate the Floquet states.
Returns
-------
output : list
A list of Floquet states for the time :math:`t`.
"""
return [(f_modes_t[i] * exp(-1j * f_energies[i] * t))
for i in np.arange(len(f_energies))]
def floquet_states_t(f_modes_0, f_energies, t, H, T, args=None):
"""
Evaluate the floquet states at time t given the initial Floquet modes.
Parameters
----------
f_modes_t : list of :class:`qutip.qobj` (kets)
A list of initial Floquet modes (for time :math:`t=0`).
f_energies : array
The Floquet energies.
t : float
The time for which to evaluate the Floquet states.
H : :class:`qutip.qobj`
System Hamiltonian, time-dependent with period `T`.
T : float
The period of the time-dependence of the hamiltonian.
args : dictionary
Dictionary with variables required to evaluate H.
Returns
-------
output : list
A list of Floquet states for the time :math:`t`.
"""
f_modes_t = floquet_modes_t(f_modes_0, f_energies, t, H, T, args)
return [(f_modes_t[i] * exp(-1j * f_energies[i] * t))
for i in np.arange(len(f_energies))]
def floquet_wavefunction(f_modes_t, f_energies, f_coeff, t):
"""
Evaluate the wavefunction for a time t using the Floquet state
decompositon, given the Floquet modes at time `t`.
Parameters
----------
f_modes_t : list of :class:`qutip.qobj` (kets)
A list of initial Floquet modes (for time :math:`t=0`).
f_energies : array
The Floquet energies.
f_coeff : array
The coefficients for Floquet decomposition of the initial wavefunction.
t : float
The time for which to evaluate the Floquet states.
Returns
-------
output : :class:`qutip.qobj`
The wavefunction for the time :math:`t`.
"""
return sum([f_modes_t[i] * exp(-1j * f_energies[i] * t) * f_coeff[i]
for i in np.arange(len(f_energies))])
def floquet_wavefunction_t(f_modes_0, f_energies, f_coeff, t, H, T, args=None):
"""
Evaluate the wavefunction for a time t using the Floquet state
decompositon, given the initial Floquet modes.
Parameters
----------
f_modes_t : list of :class:`qutip.qobj` (kets)
A list of initial Floquet modes (for time :math:`t=0`).
f_energies : array
The Floquet energies.
f_coeff : array
The coefficients for Floquet decomposition of the initial wavefunction.
t : float
The time for which to evaluate the Floquet states.
H : :class:`qutip.qobj`
System Hamiltonian, time-dependent with period `T`.
T : float
The period of the time-dependence of the hamiltonian.
args : dictionary
Dictionary with variables required to evaluate H.
Returns
-------
output : :class:`qutip.qobj`
The wavefunction for the time :math:`t`.
"""
f_states_t = floquet_states_t(f_modes_0, f_energies, t, H, T, args)
return sum([f_states_t[i] * f_coeff[i]
for i in np.arange(len(f_energies))])
def floquet_state_decomposition(f_states, f_energies, psi):
"""
Decompose the wavefunction `psi` (typically an initial state) in terms of
the Floquet states, :math:`\psi = \sum_\\alpha c_\\alpha \psi_\\alpha(0)`.
Parameters
----------
f_states : list of :class:`qutip.qobj` (kets)
A list of Floquet modes.
f_energies : array
The Floquet energies.
psi : :class:`qutip.qobj`
The wavefunction to decompose in the Floquet state basis.
Returns
-------
output : array
The coefficients :math:`c_\\alpha` in the Floquet state decomposition.
"""
return [(f_states[i].dag() * psi).data[0, 0]
for i in np.arange(len(f_energies))]
def fsesolve(H, psi0, tlist, e_ops=[], T=None, args={}, Tsteps=100):
"""
Solve the Schrodinger equation using the Floquet formalism.
Parameters
----------
H : :class:`qutip.qobj.Qobj`
System Hamiltonian, time-dependent with period `T`.
psi0 : :class:`qutip.qobj`
Initial state vector (ket).
tlist : *list* / *array*
list of times for :math:`t`.
e_ops : list of :class:`qutip.qobj` / callback function
list of operators for which to evaluate expectation values. If this
list is empty, the state vectors for each time in `tlist` will be
returned instead of expectation values.
T : float
The period of the time-dependence of the hamiltonian.
args : dictionary
Dictionary with variables required to evaluate H.
Tsteps : integer
The number of time steps in one driving period for which to
precalculate the Floquet modes. `Tsteps` should be an even number.
Returns
-------
output : :class:`qutip.solver.Result`
An instance of the class :class:`qutip.solver.Result`, which
contains either an *array* of expectation values or an array of
state vectors, for the times specified by `tlist`.
"""
if not T:
# assume that tlist span exactly one period of the driving
T = tlist[-1]
# find the floquet modes for the time-dependent hamiltonian
f_modes_0, f_energies = floquet_modes(H, T, args)
# calculate the wavefunctions using the from the floquet modes
f_modes_table_t = floquet_modes_table(f_modes_0, f_energies,
np.linspace(0, T, Tsteps + 1),
H, T, args)
# setup Result for storing the results
output = Result()
output.times = tlist
output.solver = "fsesolve"
if isinstance(e_ops, FunctionType):
output.num_expect = 0
expt_callback = True
elif isinstance(e_ops, list):
output.num_expect = len(e_ops)
expt_callback = False
if output.num_expect == 0:
output.states = []
else:
output.expect = []
for op in e_ops:
if op.isherm:
output.expect.append(np.zeros(len(tlist)))
else:
output.expect.append(np.zeros(len(tlist), dtype=complex))
else:
raise TypeError("e_ops must be a list Qobj or a callback function")
psi0_fb = psi0.transform(f_modes_0)
for t_idx, t in enumerate(tlist):
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
f_states_t = floquet_states(f_modes_t, f_energies, t)
psi_t = psi0_fb.transform(f_states_t, True)
if expt_callback:
# use callback method
e_ops(t, psi_t)
else:
# calculate all the expectation values, or output psi if
# no expectation value operators where defined
if output.num_expect == 0:
output.states.append(Qobj(psi_t))
else:
for e_idx, e in enumerate(e_ops):
output.expect[e_idx][t_idx] = expect(e, psi_t)
return output
def floquet_master_equation_rates(f_modes_0, f_energies, c_op, H, T,
args, J_cb, w_th, kmax=5,
f_modes_table_t=None):
"""
Calculate the rates and matrix elements for the Floquet-Markov master
equation.
Parameters
----------
f_modes_0 : list of :class:`qutip.qobj` (kets)
A list of initial Floquet modes.
f_energies : array
The Floquet energies.
c_op : :class:`qutip.qobj`
The collapse operators describing the dissipation.
H : :class:`qutip.qobj`
System Hamiltonian, time-dependent with period `T`.
T : float
The period of the time-dependence of the hamiltonian.
args : dictionary
Dictionary with variables required to evaluate H.
J_cb : callback functions
A callback function that computes the noise power spectrum, as
a function of frequency, associated with the collapse operator `c_op`.
w_th : float
The temperature in units of frequency.
k_max : int
The truncation of the number of sidebands (default 5).
f_modes_table_t : nested list of :class:`qutip.qobj` (kets)
A lookup-table of Floquet modes at times precalculated by
:func:`qutip.floquet.floquet_modes_table` (optional).
Returns
-------
output : list
A list (Delta, X, Gamma, A) containing the matrices Delta, X, Gamma
and A used in the construction of the Floquet-Markov master equation.
"""
N = len(f_energies)
M = 2 * kmax + 1
omega = (2 * pi) / T
Delta = np.zeros((N, N, M))
X = np.zeros((N, N, M), dtype=complex)
Gamma = np.zeros((N, N, M))
A = np.zeros((N, N))
nT = 100
dT = T / nT
tlist = np.arange(dT, T + dT / 2, dT)
if f_modes_table_t is None:
f_modes_table_t = floquet_modes_table(f_modes_0, f_energies,
np.linspace(0, T, nT + 1), H, T,
args)
for t in tlist:
# TODO: repeated invocations of floquet_modes_t is
# inefficient... make a and b outer loops and use the mesolve
# instead of the propagator.
# f_modes_t = floquet_modes_t(f_modes_0, f_energies, t, H, T, args)
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
for a in range(N):
for b in range(N):
k_idx = 0
for k in range(-kmax, kmax + 1, 1):
X[a, b, k_idx] += (dT / T) * exp(-1j * k * omega * t) * \
(f_modes_t[a].dag() * c_op * f_modes_t[b])[0, 0]
k_idx += 1
Heaviside = lambda x: ((np.sign(x) + 1) / 2.0)
for a in range(N):
for b in range(N):
k_idx = 0
for k in range(-kmax, kmax + 1, 1):
Delta[a, b, k_idx] = f_energies[a] - f_energies[b] + k * omega
Gamma[a, b, k_idx] = 2 * pi * Heaviside(Delta[a, b, k_idx]) * \
J_cb(Delta[a, b, k_idx]) * abs(X[a, b, k_idx]) ** 2
k_idx += 1
for a in range(N):
for b in range(N):
for k in range(-kmax, kmax + 1, 1):
k1_idx = k + kmax
k2_idx = -k + kmax
A[a, b] += Gamma[a, b, k1_idx] + \
n_thermal(abs(Delta[a, b, k1_idx]), w_th) * \
(Gamma[a, b, k1_idx] + Gamma[b, a, k2_idx])
return Delta, X, Gamma, A
def floquet_collapse_operators(A):
"""
Construct collapse operators corresponding to the Floquet-Markov
master-equation rate matrix `A`.
.. note::
Experimental.
"""
c_ops = []
N, M = np.shape(A)
#
# Here we really need a master equation on Bloch-Redfield form, or perhaps
# we can use the Lindblad form master equation with some rotating frame
# approximations? ...
#
for a in range(N):
for b in range(N):
if a != b and abs(A[a, b]) > 0.0:
# only relaxation terms included...
c_ops.append(sqrt(A[a, b]) * projection(N, a, b))
return c_ops
def floquet_master_equation_tensor(Alist, f_energies):
"""
Construct a tensor that represents the master equation in the floquet
basis (with constant Hamiltonian and collapse operators).
Simplest RWA approximation [Grifoni et al, Phys.Rep. 304 229 (1998)]
Parameters
----------
Alist : list
A list of Floquet-Markov master equation rate matrices.
f_energies : array
The Floquet energies.
Returns
-------
output : array
The Floquet-Markov master equation tensor `R`.
"""
if isinstance(Alist, list):
# Alist can be a list of rate matrices corresponding
# to different operators that couple to the environment
N, M = np.shape(Alist[0])
else:
# or a simple rate matrix, in which case we put it in a list
Alist = [Alist]
N, M = np.shape(Alist[0])
R = Qobj(scipy.sparse.csr_matrix((N * N, N * N)), [[N, N], [N, N]],
[N * N, N * N])
R.data = R.data.tolil()
for I in range(N * N):
a, b = vec2mat_index(N, I)
for J in range(N * N):
c, d = vec2mat_index(N, J)
R.data[I, J] = -1.0j * (f_energies[a] - f_energies[b]) * \
(a == c) * (b == d)
for A in Alist:
s1 = s2 = 0
for n in range(N):
s1 += A[a, n] * (n == c) * (n == d) - A[n, a] * \
(a == c) * (a == d)
s2 += (A[n, a] + A[n, b]) * (a == c) * (b == d)
dR = (a == b) * s1 - 0.5 * (1 - (a == b)) * s2
if dR != 0.0:
R.data[I, J] += dR
R.data = R.data.tocsr()
return R
def floquet_master_equation_steadystate(H, A):
"""
Returns the steadystate density matrix (in the floquet basis!) for the
Floquet-Markov master equation.
"""
c_ops = floquet_collapse_operators(A)
rho_ss = steadystate(H, c_ops)
return rho_ss
def floquet_basis_transform(f_modes, f_energies, rho0):
"""
Make a basis transform that takes rho0 from the floquet basis to the
computational basis.
"""
return rho0.transform(f_modes, True)
# -----------------------------------------------------------------------------
# Floquet-Markov master equation
#
#
def floquet_markov_mesolve(R, ekets, rho0, tlist, e_ops, f_modes_table=None,
options=None, floquet_basis=True):
"""
Solve the dynamics for the system using the Floquet-Markov master equation.
"""
if options is None:
opt = Options()
else:
opt = options
if opt.tidy:
R.tidyup()
#
# check initial state
#
if isket(rho0):
# Got a wave function as initial state: convert to density matrix.
rho0 = ket2dm(rho0)
#
# prepare output array
#
n_tsteps = len(tlist)
dt = tlist[1] - tlist[0]
output = Result()
output.solver = "fmmesolve"
output.times = tlist
if isinstance(e_ops, FunctionType):
n_expt_op = 0
expt_callback = True
elif isinstance(e_ops, list):
n_expt_op = len(e_ops)
expt_callback = False
if n_expt_op == 0:
output.states = []
else:
if not f_modes_table:
raise TypeError("The Floquet mode table has to be provided " +
"when requesting expectation values.")
output.expect = []
output.num_expect = n_expt_op
for op in e_ops:
if op.isherm:
output.expect.append(np.zeros(n_tsteps))
else:
output.expect.append(np.zeros(n_tsteps, dtype=complex))
else:
raise TypeError("Expectation parameter must be a list or a function")
#
# transform the initial density matrix to the eigenbasis: from
# computational basis to the floquet basis
#
if ekets is not None:
rho0 = rho0.transform(ekets)
#
# setup integrator
#
initial_vector = mat2vec(rho0.full())
r = scipy.integrate.ode(cy_ode_rhs)
r.set_f_params(R.data.data, R.data.indices, R.data.indptr)
r.set_integrator('zvode', method=opt.method, order=opt.order,
atol=opt.atol, rtol=opt.rtol, max_step=opt.max_step)
r.set_initial_value(initial_vector, tlist[0])
#
# start evolution
#
rho = Qobj(rho0)
t_idx = 0
for t in tlist:
if not r.successful():
break
rho.data = vec2mat(r.y)
if expt_callback:
# use callback method
if floquet_basis:
e_ops(t, Qobj(rho))
else:
f_modes_table_t, T = f_modes_table
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
e_ops(t, Qobj(rho).transform(f_modes_t, True))
else:
# calculate all the expectation values, or output rho if
# no operators
if n_expt_op == 0:
if floquet_basis:
output.states.append(Qobj(rho))
else:
f_modes_table_t, T = f_modes_table
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
output.states.append(Qobj(rho).transform(f_modes_t, True))
else:
f_modes_table_t, T = f_modes_table
f_modes_t = floquet_modes_t_lookup(f_modes_table_t, t, T)
for m in range(0, n_expt_op):
output.expect[m][t_idx] = \
expect(e_ops[m], rho.transform(f_modes_t, False))
r.integrate(r.t + dt)
t_idx += 1
return output
# -----------------------------------------------------------------------------
# Solve the Floquet-Markov master equation
#
#
def fmmesolve(H, rho0, tlist, c_ops, e_ops=[], spectra_cb=[], T=None,
args={}, options=Options(), floquet_basis=True, kmax=5):
"""
Solve the dynamics for the system using the Floquet-Markov master equation.
.. note::
This solver currently does not support multiple collapse operators.
Parameters
----------
H : :class:`qutip.qobj`
system Hamiltonian.
rho0 / psi0 : :class:`qutip.qobj`
initial density matrix or state vector (ket).
tlist : *list* / *array*
list of times for :math:`t`.
c_ops : list of :class:`qutip.qobj`
list of collapse operators.
e_ops : list of :class:`qutip.qobj` / callback function
list of operators for which to evaluate expectation values.
spectra_cb : list callback functions
List of callback functions that compute the noise power spectrum as
a function of frequency for the collapse operators in `c_ops`.
T : float
The period of the time-dependence of the hamiltonian. The default value
'None' indicates that the 'tlist' spans a single period of the driving.
args : *dictionary*
dictionary of parameters for time-dependent Hamiltonians and
collapse operators.
This dictionary should also contain an entry 'w_th', which is
the temperature of the environment (if finite) in the
energy/frequency units of the Hamiltonian. For example, if
the Hamiltonian written in units of 2pi GHz, and the
temperature is given in K, use the following conversion
>>> temperature = 25e-3 # unit K
>>> h = 6.626e-34
>>> kB = 1.38e-23
>>> args['w_th'] = temperature * (kB / h) * 2 * pi * 1e-9
options : :class:`qutip.solver`
options for the ODE solver.
k_max : int
The truncation of the number of sidebands (default 5).
Returns
-------
output : :class:`qutip.solver`
An instance of the class :class:`qutip.solver`, which contains either
an *array* of expectation values for the times specified by `tlist`.
"""
if T is None:
T = max(tlist)
if len(spectra_cb) == 0:
# add white noise callbacks if absent
spectra_cb = [lambda w: 1.0] * len(c_ops)
f_modes_0, f_energies = floquet_modes(H, T, args)
f_modes_table_t = floquet_modes_table(f_modes_0, f_energies,
np.linspace(0, T, 500 + 1),
H, T, args)
# get w_th from args if it exists
if 'w_th' in args:
w_th = args['w_th']
else:
w_th = 0
# TODO: loop over input c_ops and spectra_cb, calculate one R for each set
# calculate the rate-matrices for the floquet-markov master equation
Delta, X, Gamma, Amat = floquet_master_equation_rates(
f_modes_0, f_energies, c_ops[0], H, T, args, spectra_cb[0],
w_th, kmax, f_modes_table_t)
# the floquet-markov master equation tensor
R = floquet_master_equation_tensor(Amat, f_energies)
return floquet_markov_mesolve(R, f_modes_0, rho0, tlist, e_ops,
f_modes_table=(f_modes_table_t, T),
options=options,
floquet_basis=floquet_basis)
|
{
"content_hash": "a8dd0eff27c6f23bd2a1314333b55216",
"timestamp": "",
"source": "github",
"line_count": 957,
"max_line_length": 79,
"avg_line_length": 29.244514106583072,
"alnum_prop": 0.5739807767892235,
"repo_name": "zasdfgbnm/qutip",
"id": "e3b1a1df93414b5e455f875571ec8657f07caf75",
"size": "29803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qutip/floquet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "FORTRAN",
"bytes": "259707"
},
{
"name": "Makefile",
"bytes": "3079"
},
{
"name": "Python",
"bytes": "1733190"
},
{
"name": "Shell",
"bytes": "2931"
}
],
"symlink_target": ""
}
|
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import numpy
from radiometric_normalization.utils import pixel_list_to_array
def filter_by_residuals_from_line(candidate_band, reference_band,
combined_alpha, threshold=1000,
line_gain=1.0, line_offset=0.0):
''' Filters pixels by their residuals from a line
:param array candidate_band: A 2D array representing the image data of the
candidate band
:param array reference_band: A 2D array representing the image data of the
reference image
:param array combined_alpha: A 2D array representing the alpha mask of the
valid pixels in both the candidate array and
reference array
:param float threshold: The distance from the line within which to include
pixels
:param float line_gain: The gradient of the line
:param float line_offset: The intercept of the line
:returns: A 2D array of boolean mask representing each valid pixel (True)
'''
valid_pixels = numpy.nonzero(combined_alpha)
filtered_pixels = filter_by_residuals_from_line_pixel_list(
candidate_band[valid_pixels], reference_band[valid_pixels], threshold,
line_gain, line_offset)
mask = pixel_list_to_array(
valid_pixels, filtered_pixels, candidate_band.shape)
no_passed_pixels = len(numpy.nonzero(mask)[0])
logging.info(
'Filtering: Valid data = {} out of {} ({}%)'.format(
no_passed_pixels,
candidate_band.size,
100.0 * no_passed_pixels / candidate_band.size))
return mask
def filter_by_residuals_from_line_pixel_list(candidate_data, reference_data,
threshold=1000, line_gain=1.0,
line_offset=0.0):
''' Calculates the residuals from a line and filters by residuals.
:param list candidate_band: A list of valid candidate data
:param list reference_band: A list of coincident valid reference data
:param float line_gain: The gradient of the line
:param float line_offset: The intercept of the line
:returns: A list of booleans the same length as candidate representing if
the data point is still active after filtering or not
'''
logging.info('Filtering: Filtering from line: y = '
'{} * x + {} @ {}'.format(line_gain, line_offset, threshold))
def _get_residual(data_1, data_2):
return numpy.absolute(line_gain * data_1 - data_2 + line_offset) / \
numpy.sqrt(1 + line_gain * line_gain)
residuals = _get_residual(candidate_data, reference_data)
return residuals < threshold
def filter_by_histogram(candidate_band, reference_band,
combined_alpha, threshold=0.1,
number_of_valid_bins=None, rough_search=False,
number_of_total_bins_in_one_axis=10):
''' Filters pixels using a 2D histogram of common values.
This function is more memory intensive but keeps the 2D structure of the
pixels
There is one mutually exclusive option:
- either a number_of_valid_bins is specified, which represents how many of
the most popular bins to select
- or a threshold is specified, which specifies a minimum population above
which a bin is selected
:param array candidate_band: A 2D array representing the image data of the
candidate band
:param array reference_band: A 2D array representing the image data of the
reference image
:param array combined_alpha: A 2D array representing the alpha mask of the
valid pixels in both the candidate array and
reference array
:param float threshold: A threshold on the population of a bin. Above this
number, a bin is selected. This number is a
fraction of the maximum bin (i.e. a value of 0.1
will mean that the bin will need to have more than
10% than the most populous bin)
:param int number_of_valid_bins: The total number of bins to select. The
bins are arranged in descending order of
population. This number specifies how
many of the top bins are selected (i.e.
a value of 3 will mean that the top three
bins are selected). If this is specified
then the threshold parameter is ignored
:param boolean rough_search: If this is true, the bins will specify a
maximum and minimum range within which to
select pixels (i.e. one superset bin will be
created from the bins that are selected).
This should speed up the computation but be
less exact
:param int number_of_total_bins_in_one_axis: This number controls the total
number of bins. This number
represents the total number of
bins in one axis. Both axes
have the same number (i.e. a
value of 10 will mean that
there are 100 bins in total)
:returns: A 2D array of boolean mask representing each valid pixel (True)
'''
valid_pixels = numpy.nonzero(combined_alpha)
candidate_data = candidate_band[valid_pixels]
reference_data = reference_band[valid_pixels]
filtered_pixels = filter_by_histogram_pixel_list(
candidate_data, reference_data, threshold, number_of_valid_bins,
rough_search, number_of_total_bins_in_one_axis)
mask = pixel_list_to_array(
valid_pixels, filtered_pixels, candidate_band.shape)
no_passed_pixels = len(numpy.nonzero(mask)[0])
logging.info(
'Filtering: Valid data = {} out of {} ({}%)'.format(
no_passed_pixels,
candidate_band.size,
100.0 * no_passed_pixels / candidate_band.size))
return mask
def filter_by_histogram_pixel_list(candidate_data, reference_data,
threshold=0.1, number_of_valid_bins=None,
rough_search=False,
number_of_total_bins_in_one_axis=10):
''' Filters pixels using a 2D histogram of common values.
This function is for lists of coincident candidate and reference pixel data
There is one mutually exclusive option:
- either a number_of_valid_bins is specified, which represents how many of
the most popular bins to select
- or a threshold is specified, which specifies a minimum population above
which a bin is selected
:param list candidate_band: A list of valid candidate data
:param list reference_band: A list of coincident valid reference data
:param float threshold: A threshold on the population of a bin. Above this
number, a bin is selected. This number is a
fraction of the maximum bin (i.e. a value of 0.1
will mean that the bin will need to have more than
10% than the most populous bin)
:param int number_of_valid_bins: The total number of bins to select. The
bins are arranged in descending order of
population. This number specifies how
many of the top bins are selected (i.e.
a value of 3 will mean that the top three
bins are selected). If this is specified
then the threshold parameter is ignored
:param boolean rough_search: If this is true, the bins will specify a
maximum and minimum range within which to
select pixels (i.e. one superset bin will be
created from the bins that are selected).
This should speed up the computation but be
less exact
:param int number_of_total_bins_in_one_axis: This number controls the total
number of bins. This number
represents the total number of
bins in one axis. Both axes
have the same number (i.e. a
value of 10 will mean that
there are 100 bins in total)
:returns: A list of booleans the same length as candidate representing if
the data point is still active after filtering or not
'''
logging.info('Filtering: Filtering by histogram.')
H, candidate_bins, reference_bins = numpy.histogram2d(
candidate_data, reference_data, bins=number_of_total_bins_in_one_axis)
def get_valid_range():
c_min = min([candidate_bins[v] for v in passed_bins[0]])
c_max = max([candidate_bins[v + 1] for v in passed_bins[0]])
r_min = min([reference_bins[v] for v in passed_bins[1]])
r_max = max([reference_bins[v + 1] for v in passed_bins[1]])
return c_min, c_max, r_min, r_max
def check_in_valid_range(c_data_point, r_data_point):
if c_data_point >= c_min and \
c_data_point <= c_max and \
r_data_point >= r_min and \
r_data_point <= r_max:
return True
return False
if number_of_valid_bins:
logging.info('Filtering: Filtering by number of histogram bins: '
'{}'.format(number_of_valid_bins))
passed_bins = numpy.unravel_index(
numpy.argsort(H.ravel())[-number_of_valid_bins:], H.shape)
else:
logging.info('Filtering: Filtering by threshold: {}'.format(threshold))
H_max = float(max(H.flatten()))
passed_bins = numpy.nonzero(H / H_max > threshold)
logging.info(
'{} bins out of {} passed'.format(
len(passed_bins[0]), len(H.flatten())))
if rough_search:
logging.debug('Filtering: Rough filtering only')
c_min, c_max, r_min, r_max = get_valid_range()
logging.debug(
'Valid range: Candidate = ({}, {}), Reference = ({}, {})'.format(
c_min, c_max, r_min, r_max))
return [check_in_valid_range(c, r) for c, r in
zip(candidate_data, reference_data)]
else:
logging.debug('Filtering: Exact filtering by bins')
candidate_bin_ids = numpy.digitize(candidate_data, candidate_bins) - 1
reference_bin_ids = numpy.digitize(reference_data, reference_bins) - 1
passed_bin_pairs = zip(passed_bins[0], passed_bins[1])
return [(c, r) in passed_bin_pairs for c, r in
zip(candidate_bin_ids, reference_bin_ids)]
|
{
"content_hash": "e2a3e0188d659f46118aecca9ae346b7",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 79,
"avg_line_length": 49.068548387096776,
"alnum_prop": 0.5707946421234283,
"repo_name": "planetlabs/radiometric_normalization",
"id": "83ae97638ec625bdc652240365688ace0ab6d052",
"size": "12169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radiometric_normalization/filtering.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "106911"
},
{
"name": "Shell",
"bytes": "955"
}
],
"symlink_target": ""
}
|
import numpy as np
np.maximum_sctype("S8")
np.maximum_sctype(object)
np.issctype(object)
np.issctype("S8")
np.obj2sctype(list)
np.obj2sctype(list, default=None)
np.obj2sctype(list, default=np.string_)
np.issubclass_(np.int32, int)
np.issubclass_(np.float64, float)
np.issubclass_(np.float64, (int, float))
np.issubsctype("int64", int)
np.issubsctype(np.array([1]), np.array([1]))
np.issubdtype("S1", np.string_)
np.issubdtype(np.float64, np.float32)
np.sctype2char("S1")
np.sctype2char(list)
np.find_common_type([], [np.int64, np.float32, complex])
np.find_common_type((), (np.int64, np.float32, complex))
np.find_common_type([np.int64, np.float32], [])
np.find_common_type([np.float32], [np.int64, np.float64])
np.cast[int]
np.cast["i8"]
np.cast[np.int64]
np.nbytes[int]
np.nbytes["i8"]
np.nbytes[np.int64]
np.ScalarType
np.ScalarType[0]
np.ScalarType[4]
np.ScalarType[9]
np.ScalarType[11]
np.typecodes["Character"]
np.typecodes["Complex"]
np.typecodes["All"]
|
{
"content_hash": "491756e71a928083b57b6ff5c6fbe303",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 57,
"avg_line_length": 20.70212765957447,
"alnum_prop": 0.7194244604316546,
"repo_name": "simongibbons/numpy",
"id": "5af0d171ca0482bb6e1ad73b9b3e7277b635aec9",
"size": "973",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "numpy/typing/tests/data/pass/numerictypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5486229"
},
{
"name": "C++",
"bytes": "95911"
},
{
"name": "Cython",
"bytes": "147831"
},
{
"name": "D",
"bytes": "19"
},
{
"name": "Dockerfile",
"bytes": "5130"
},
{
"name": "Fortran",
"bytes": "8505"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "1697"
},
{
"name": "Python",
"bytes": "9920017"
},
{
"name": "Shell",
"bytes": "13540"
},
{
"name": "Smarty",
"bytes": "4071"
},
{
"name": "TeX",
"bytes": "896"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
from beritest_tools import BaseBERITestCase
class test_raw_dli(BaseBERITestCase):
def test_reg_values(self):
'''Test dli instruction across all general-purpose registers'''
for i in range(32):
self.assertRegisterExpected(i, i, "Register load immediate failed")
|
{
"content_hash": "d58a1585af933ba3d5b7cec44d325d17",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 79,
"avg_line_length": 42,
"alnum_prop": 0.7074829931972789,
"repo_name": "8l/beri",
"id": "8e9c94bae00e06d098fc9275b52198c290bdf4fe",
"size": "1432",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cheritest/trunk/tests/framework/test_raw_dli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1629022"
},
{
"name": "Bluespec",
"bytes": "2336405"
},
{
"name": "C",
"bytes": "1058899"
},
{
"name": "C++",
"bytes": "1864"
},
{
"name": "Groff",
"bytes": "14381"
},
{
"name": "Haskell",
"bytes": "11711"
},
{
"name": "Lex",
"bytes": "2894"
},
{
"name": "Makefile",
"bytes": "242450"
},
{
"name": "Mathematica",
"bytes": "291"
},
{
"name": "Objective-C",
"bytes": "2387"
},
{
"name": "OpenEdge ABL",
"bytes": "568"
},
{
"name": "Perl",
"bytes": "19159"
},
{
"name": "Python",
"bytes": "1491002"
},
{
"name": "Shell",
"bytes": "91130"
},
{
"name": "SystemVerilog",
"bytes": "12058"
},
{
"name": "Tcl",
"bytes": "132818"
},
{
"name": "TeX",
"bytes": "4996"
},
{
"name": "Verilog",
"bytes": "125674"
},
{
"name": "Yacc",
"bytes": "5871"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
#Face Recognization identifiers
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
# Starts Video Camera
# 0 stands for front camera
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY )
faces = face_cascade.detectMultiScale(gray)
for(x,y,w,h) in faces:
# Rectangle around face
print(cv2.rectangle(img, (x,y), (x+w, y+h),(255,0,0), 2))
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Rectangle around eye
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0), 2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
#While closing, stops camera
cap.release()
cv2.destroyAllWindows()
|
{
"content_hash": "d514e70f8ad9916c0dc91bdde922319d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 32,
"alnum_prop": 0.7002314814814815,
"repo_name": "Ansonmathew/Python-Cracks",
"id": "88ddc30de19aa10d6111214444b692d01a67e6dd",
"size": "864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "face.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1000"
}
],
"symlink_target": ""
}
|
"""
QUESTION:
You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you
take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn
to remove the stones.
Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win
the game given the number of stones in the heap.
For example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones you
remove, the last stone will always be removed by your friend.
Hint:
If there are 5 stones in the heap, could you figure out a way to remove the stones such that you will always be the
winner?
ANSWER:
checking the "quy luat"
"""
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return n % 4 > 0
if __name__ == '__main__':
print Solution().canWinNim(10)
|
{
"content_hash": "ae531bcf810f42195de313320dd35690",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 119,
"avg_line_length": 32.12903225806452,
"alnum_prop": 0.7008032128514057,
"repo_name": "tktrungna/leetcode",
"id": "f7132ac51bb2b362f81e9375f1c545a508edbfd2",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/nim-game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410956"
}
],
"symlink_target": ""
}
|
""" Creating controllers based on renderer
There are 2 types of rendering, stdout and urwid. This is triggered
by passed a '-y' on the cli to trigger a headless (stdout) vs non-headless
(urwid).
Each controller will contain 2 modules, TUI (stdout) and GUI (urwid).
Both TUI() and GUI() should provide at least an entry method (render) and an
exit method (finish). This is a hard rule and is documented here so that
the controllers can stay consistent in their execution. All other functions
in their respective module should be prepended with double underscore '__'
as they should only be relevant to that module.
If both renderers share code place those functions inside a `common.py` module
and import relative to the render module, for example (from newcloud
controller),
from .common import check_bridge_exists
See any of the controllers for examples.
Usage:
# Render GUI version of clouds controller
from conjureup import controllers
controllers.use('clouds').render()
or
# Render TUI version of clouds controller
from conjureup.app_config import app
app.headless = True
c = controllers.use('clouds')
c.finish()
"""
from functools import lru_cache
from importlib import import_module
from conjureup import events, consts
from conjureup.app_config import app
import yaml
from conjureup import charm
from conjureup.utils import slurp
from conjureup.bundle import Bundle
from pathlib import Path
from itertools import chain
def setup_metadata_controller():
""" Load metadata controller based on spell_type
"""
if app.metadata.spell_type == consts.spell_types.SNAP:
return _setup_snap_metadata_controller()
# This is the typical default for now
return _setup_juju_metadata_controller()
def _setup_snap_metadata_controller():
""" Sets metadata for a snap spell
"""
spell_dir = Path(app.config['spell-dir'])
bundle_filename = spell_dir / 'bundle.yaml'
bundle_custom_filename = spell_dir / 'bundle-custom.yaml'
if bundle_filename.exists():
# Load bundle data early so we can merge any additional charm options
bundle_data = Bundle(yaml.load(bundle_filename.read_text()),
spell_type=app.metadata.spell_type)
else:
bundle_data = Bundle(spell_type=app.metadata.spell_type)
if bundle_custom_filename.exists():
bundle_custom = yaml.load(slurp(bundle_custom_filename))
bundle_data.apply(bundle_custom)
for name in app.selected_addons:
addon = app.addons[name]
bundle_data.apply(addon.bundle)
steps = list(chain(app.steps,
chain.from_iterable(app.addons[addon].steps
for addon in app.selected_addons)))
for step in steps:
if not (step.bundle_add or step.bundle_remove):
continue
if step.bundle_remove:
fragment = yaml.safe_load(step.bundle_remove.read_text())
bundle_data.subtract(fragment)
if step.bundle_add:
fragment = yaml.safe_load(step.bundle_add.read_text())
bundle_data.apply(fragment)
if app.conjurefile['bundle-remove']:
fragment = yaml.safe_load(app.conjurefile['bundle-remove'].read_text())
bundle_data.subtract(fragment)
if app.conjurefile['bundle-add']:
fragment = yaml.safe_load(app.conjurefile['bundle-add'].read_text())
bundle_data.apply(fragment)
app.current_bundle = bundle_data
def _setup_juju_metadata_controller():
""" Pulls in a local bundle or via charmstore api and sets up our
controller. You can also further customize the bundle by providing a local
bundle-custom.yaml that will be deep merged over whatever bundle is
referenced. """
spell_dir = Path(app.config['spell-dir'])
bundle_filename = spell_dir / 'bundle.yaml'
bundle_custom_filename = spell_dir / 'bundle-custom.yaml'
if bundle_filename.exists():
# Load bundle data early so we can merge any additional charm options
bundle_data = Bundle(yaml.load(bundle_filename.read_text()))
else:
bundle_name = app.metadata.bundle_name
if bundle_name is None:
raise Exception(
"Could not determine a bundle to download, please make sure "
"the spell contains a 'bundle-name' field."
)
bundle_channel = app.conjurefile['channel']
app.log.debug("Pulling bundle for {} from channel: {}".format(
bundle_name, bundle_channel))
bundle_data = Bundle(charm.get_bundle(bundle_name, bundle_channel))
if bundle_custom_filename.exists():
bundle_custom = yaml.load(slurp(bundle_custom_filename))
bundle_data.apply(bundle_custom)
for name in app.selected_addons:
addon = app.addons[name]
bundle_data.apply(addon.bundle)
steps = list(chain(app.steps,
chain.from_iterable(app.addons[addon].steps
for addon in app.selected_addons)))
for step in steps:
if not (step.bundle_add or step.bundle_remove):
continue
if step.bundle_remove:
fragment = yaml.safe_load(step.bundle_remove.read_text())
bundle_data.subtract(fragment)
if step.bundle_add:
fragment = yaml.safe_load(step.bundle_add.read_text())
bundle_data.apply(fragment)
if app.conjurefile['bundle-remove']:
fragment = yaml.safe_load(app.conjurefile['bundle-remove'].read_text())
bundle_data.subtract(fragment)
if app.conjurefile['bundle-add']:
fragment = yaml.safe_load(app.conjurefile['bundle-add'].read_text())
bundle_data.apply(fragment)
app.current_bundle = bundle_data
@lru_cache(maxsize=None)
def use(controller):
""" Loads view Controller
All controllers contain the following structure
conjure/controllers/<controller name>/{gui,tui}.py
Arguments:
controller: name of view controller to Load
"""
if events.Error.is_set() or events.Shutdown.is_set():
# once an error has been encountered or a shutdown issued
# we don't want to allow any new controllers to be rendered
return NoopController()
if app.metadata and hasattr(app.metadata, 'spell_type'):
spell_type = app.metadata.spell_type
else:
spell_type = consts.spell_types.JUJU
if app.headless:
pkg = ("conjureup.controllers.{}.{}.tui".format(
spell_type,
controller))
else:
pkg = ("conjureup.controllers.{}.{}.gui".format(
spell_type, controller))
module = import_module(pkg)
if '_controller_class' in dir(module):
return module._controller_class()
else:
return module
class NoopController:
def render(self, *args, **kwargs):
pass
|
{
"content_hash": "10ee0652e9253b8aaf728d302b6b252c",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 79,
"avg_line_length": 35.005102040816325,
"alnum_prop": 0.6640431423990671,
"repo_name": "conjure-up/conjure-up",
"id": "1b321a6238d98ecf2b2d244f1c8527cb0fad4126",
"size": "6861",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "conjureup/controllers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2598"
},
{
"name": "Python",
"bytes": "418929"
},
{
"name": "Shell",
"bytes": "1672"
}
],
"symlink_target": ""
}
|
"""Reinforcement learning models and parameters."""
import collections
import functools
import operator
# Dependency imports
import gym
from tensor2tensor.layers import common_hparams
from tensor2tensor.utils import registry
import tensorflow as tf
@registry.register_hparams
def ppo_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.learning_rate = 1e-4
hparams.add_hparam("init_mean_factor", 0.1)
hparams.add_hparam("init_logstd", 0.1)
hparams.add_hparam("policy_layers", (100, 100))
hparams.add_hparam("value_layers", (100, 100))
hparams.add_hparam("num_agents", 30)
hparams.add_hparam("clipping_coef", 0.2)
hparams.add_hparam("gae_gamma", 0.99)
hparams.add_hparam("gae_lambda", 0.95)
hparams.add_hparam("entropy_loss_coef", 0.01)
hparams.add_hparam("value_loss_coef", 1)
hparams.add_hparam("optimization_epochs", 15)
hparams.add_hparam("epoch_length", 200)
hparams.add_hparam("epochs_num", 2000)
hparams.add_hparam("eval_every_epochs", 10)
hparams.add_hparam("num_eval_agents", 3)
hparams.add_hparam("video_during_eval", True)
return hparams
@registry.register_hparams
def continuous_action_base():
hparams = ppo_base_v1()
hparams.add_hparam("network", feed_forward_gaussian_fun)
return hparams
@registry.register_hparams
def discrete_action_base():
hparams = ppo_base_v1()
hparams.add_hparam("network", feed_forward_categorical_fun)
return hparams
# Neural networks for actor-critic algorithms
NetworkOutput = collections.namedtuple(
"NetworkOutput", "policy, value, action_postprocessing")
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
factor=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
mean = tf.contrib.layers.fully_connected(
x, action_space.shape[0], tf.tanh,
weights_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tf.contrib.distributions.MultivariateNormalDiag(mean,
tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
def feed_forward_categorical_fun(action_space, config, observations):
"""Feed-forward categorical."""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
def feed_forward_cnn_small_categorical_fun(action_space, config, observations):
"""Small cnn network with categorical output."""
del config
obs_shape = observations.shape.as_list()
x = tf.reshape(observations, [-1] + obs_shape[2:])
with tf.variable_scope("policy"):
x = tf.to_float(x) / 255.0
x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
activation_fn=tf.nn.relu, padding="SAME")
x = tf.contrib.layers.conv2d(x, 32, [5, 5], [2, 2],
activation_fn=tf.nn.relu, padding="SAME")
flat_x = tf.reshape(
x, [tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])
x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
value = tf.contrib.layers.fully_connected(x, 1, activation_fn=None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
|
{
"content_hash": "1c1412453af7c15bc626be0c7ec6146c",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 79,
"avg_line_length": 37.374149659863946,
"alnum_prop": 0.6747360757189661,
"repo_name": "rsepassi/tensor2tensor",
"id": "7433026b0c696596e50c51fde0ea1adf31132470",
"size": "6100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/models/research/rl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2328225"
},
{
"name": "Python",
"bytes": "1702690"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
}
|
from __future__ import division
import contextlib
from distutils import version
import functools
from oslo_log import log as logging
from oslo_utils import fnmatch
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both'])
QOS_MAX_IOPS = 'maxIOPS'
QOS_MAX_BWS = 'maxBWS'
def dump_provider_location(location_dict):
sorted_keys = sorted(location_dict.keys())
return '|'.join('%(k)s^%(v)s' % {'k': k, 'v': location_dict[k]}
for k in sorted_keys)
def build_provider_location(system, lun_type, lun_id, version):
"""Builds provider_location for volume or snapshot.
:param system: Unity serial number
:param lun_id: LUN ID in Unity
:param lun_type: 'lun'
:param version: driver version
"""
location_dict = {'system': system,
'type': lun_type,
'id': six.text_type(lun_id),
'version': version}
return dump_provider_location(location_dict)
def extract_provider_location(provider_location, key):
"""Extracts value of the specified field from provider_location string.
:param provider_location: provider_location string
:param key: field name of the value that to be extracted
:return: value of the specified field if it exists, otherwise,
None is returned
"""
if provider_location:
for kvp in provider_location.split('|'):
fields = kvp.split('^')
if len(fields) == 2 and fields[0] == key:
return fields[1]
else:
LOG.warning('"%(key)s" is not found in provider '
'location "%(location)s."',
{'key': key, 'location': provider_location})
else:
LOG.warning('Empty provider location received.')
def byte_to_gib(byte):
return byte / units.Gi
def byte_to_mib(byte):
return byte / units.Mi
def gib_to_mib(gib):
return gib * units.Ki
def validate_pool_names(conf_pools, array_pools):
if not conf_pools:
LOG.debug('No storage pools are specified. This host will manage '
'all the pools on the Unity system.')
return array_pools
conf_pools = set(map(lambda i: i.strip(), conf_pools))
array_pools = set(map(lambda i: i.strip(), array_pools))
existed = conf_pools & array_pools
if not existed:
msg = (_('No storage pools to be managed exist. Please check '
'your configuration. The available storage pools on the '
'system are %s.') % array_pools)
raise exception.VolumeBackendAPIException(data=msg)
return existed
def extract_iscsi_uids(connector):
if 'initiator' not in connector:
msg = _("Host %s doesn't have iSCSI initiator.") % connector['host']
raise exception.VolumeBackendAPIException(data=msg)
return [connector['initiator']]
def extract_fc_uids(connector):
if 'wwnns' not in connector or 'wwpns' not in connector:
msg = _("Host %s doesn't have FC initiators.") % connector['host']
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
wwnns = connector['wwnns']
wwpns = connector['wwpns']
wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)]
def _to_wwn(wwn):
# Format the wwn to include the colon
# For example, convert 1122200000051E55E100 to
# 11:22:20:00:00:05:1E:55:A1:00
return ':'.join(wwn[i:i + 2] for i in range(0, len(wwn), 2))
return list(map(_to_wwn, wwns))
def convert_ip_to_portal(ip):
is_ipv6_without_brackets = ':' in ip and ip[-1] != ']'
if is_ipv6_without_brackets:
return '[%s]:3260' % ip
return '%s:3260' % ip
def convert_to_itor_tgt_map(zone_mapping):
"""Function to process data from lookup service.
:param zone_mapping: mapping is the data from the zone lookup service
with below format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
"""
target_wwns = []
itor_tgt_map = {}
for san_name in zone_mapping:
one_map = zone_mapping[san_name]
for target in one_map['target_port_wwn_list']:
if target not in target_wwns:
target_wwns.append(target)
for initiator in one_map['initiator_port_wwn_list']:
itor_tgt_map[initiator] = one_map['target_port_wwn_list']
LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s",
{'tgt_wwns': target_wwns,
'itor_tgt_map': itor_tgt_map})
return target_wwns, itor_tgt_map
def get_pool_name(volume):
return vol_utils.extract_host(volume.host, 'pool')
def get_extra_spec(volume, spec_key):
spec_value = None
type_id = volume.volume_type_id
if type_id is not None:
extra_specs = volume_types.get_volume_type_extra_specs(type_id)
if spec_key in extra_specs:
spec_value = extra_specs[spec_key]
return spec_value
def ignore_exception(func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as ex:
LOG.warning('Error occurred but ignored. Function: %(func_name)s, '
'args: %(args)s, kwargs: %(kwargs)s, '
'exception: %(ex)s.',
{'func_name': func, 'args': args,
'kwargs': kwargs, 'ex': ex})
@contextlib.contextmanager
def assure_cleanup(enter_func, exit_func, use_enter_return):
"""Assures the resource is cleaned up. Used as a context.
:param enter_func: the function to execute when entering the context.
:param exit_func: the function to execute when leaving the context.
:param use_enter_return: the flag indicates whether to pass the return
value of enter_func in to the exit_func.
"""
enter_return = None
try:
if isinstance(enter_func, functools.partial):
enter_func_name = enter_func.func.__name__
else:
enter_func_name = enter_func.__name__
LOG.debug(('Entering context. Function: %(func_name)s, '
'use_enter_return: %(use)s.'),
{'func_name': enter_func_name,
'use': use_enter_return})
enter_return = enter_func()
yield enter_return
finally:
if isinstance(exit_func, functools.partial):
exit_func_name = exit_func.func.__name__
else:
exit_func_name = exit_func.__name__
LOG.debug(('Exiting context. Function: %(func_name)s, '
'use_enter_return: %(use)s.'),
{'func_name': exit_func_name,
'use': use_enter_return})
if enter_return is not None:
if use_enter_return:
ignore_exception(exit_func, enter_return)
else:
ignore_exception(exit_func)
def create_lookup_service():
return zm_utils.create_lookup_service()
def get_backend_qos_specs(volume):
type_id = volume.volume_type_id
if type_id is None:
return None
qos_specs = volume_types.get_volume_type_qos_specs(type_id)
if qos_specs is None:
return None
qos_specs = qos_specs['qos_specs']
if qos_specs is None:
return None
consumer = qos_specs['consumer']
# Front end QoS specs are handled by nova. We ignore them here.
if consumer not in BACKEND_QOS_CONSUMERS:
return None
max_iops = qos_specs['specs'].get(QOS_MAX_IOPS)
max_bws = qos_specs['specs'].get(QOS_MAX_BWS)
if max_iops is None and max_bws is None:
return None
return {
'id': qos_specs['id'],
QOS_MAX_IOPS: max_iops,
QOS_MAX_BWS: max_bws,
}
def remove_empty(option, value_list):
if value_list is not None:
value_list = list(filter(None, map(str.strip, value_list)))
if not value_list:
raise exception.InvalidConfigurationValue(option=option,
value=value_list)
return value_list
def match_any(full, patterns):
matched = list(
filter(lambda x: any(fnmatch.fnmatchcase(x, p) for p in patterns),
full))
unmatched = list(
filter(lambda x: not any(fnmatch.fnmatchcase(x, p) for p in patterns),
full))
unmatched_patterns = list(
filter(lambda p: not any(fnmatch.fnmatchcase(x, p) for x in full),
patterns))
return matched, unmatched, unmatched_patterns
def is_before_4_1(ver):
return version.LooseVersion(ver) < version.LooseVersion('4.1')
|
{
"content_hash": "f76193a3042e01a7fe91b6eec1a6f6e3",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 78,
"avg_line_length": 32.0709219858156,
"alnum_prop": 0.6000663423264042,
"repo_name": "j-griffith/cinder",
"id": "8d22d01129f18c2bf27d210b611e9ee18d353b90",
"size": "9667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/dell_emc/unity/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "621"
},
{
"name": "Python",
"bytes": "20155959"
},
{
"name": "Shell",
"bytes": "16354"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.