code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities related to loading TFDS datasets."""
import logging
from typing import Any, Dict, Iterator, Optional, Tuple, Sequence
from acme import types
from flax import jax_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
def _episode_to_transition(step: Dict[str, Any]) -> types.Transition:
return types.Transition(
observation=step['observation'][:-1],
action=step['action'][:-1],
reward=step['reward'][:-1],
discount=1.0 - tf.cast(step['is_terminal'][1:], dtype=tf.float32),
# If next step is terminal, then the observation may be arbitrary.
next_observation=step['observation'][1:],
)
def _episode_steps_to_transition(episode) -> tf.data.Dataset:
"""Transforms an Episode into a dataset of Transitions."""
episode = episode['steps']
# The code below might fail if the dataset contains more than 1e9 transitions,
# which is quite unlikely.
data = tf.data.experimental.get_single_element(episode.batch(1000000000))
data = _episode_to_transition(data)
return tf.data.Dataset.from_tensor_slices(data)
def get_tfds_dataset(dataset_name: str, num_episodes: Optional[int] = None):
dataset = tfds.load(dataset_name)['train']
if num_episodes:
dataset = dataset.take(num_episodes)
return dataset.flat_map(_episode_steps_to_transition)
# In order to avoid excessive copying on TPU one needs to make the last
# dimension a multiple of this number.
_BEST_DIVISOR = 128
def _pad(x: jnp.ndarray) -> jnp.ndarray:
if len(x.shape) != 2:
return x
# Find a more scientific way to find this threshold (30). Depending on various
# conditions for low enough sizes the excessive copying is not triggered.
if x.shape[-1] % _BEST_DIVISOR != 0 and x.shape[-1] > 30:
n = _BEST_DIVISOR - (x.shape[-1] % _BEST_DIVISOR)
x = np.pad(x, [(0, 0)] * (x.ndim - 1) + [(0, n)], 'constant')
return x
# Undo the padding.
def _unpad(x: jnp.ndarray, shape: Sequence[int]) -> jnp.ndarray:
if len(shape) == 2 and x.shape[-1] != shape[-1]:
return x[..., :shape[-1]]
return x
_PMAP_AXIS_NAME = 'data'
class JaxInMemoryRandomSampleIterator(Iterator[Any]):
"""In memory random sample iterator implemented in JAX.
Loads the whole dataset in memory and performs random sampling with
replacement of batches of `batch_size`.
This class provides much faster sampling functionality compared to using
an iterator on tf.data.Dataset.
"""
def __init__(self,
dataset: tf.data.Dataset,
key: jnp.ndarray,
batch_size: int,
shard_dataset_across_devices: bool = False):
"""Creates an iterator.
Args:
dataset: underlying tf Dataset
key: a key to be used for random number generation
batch_size: batch size
shard_dataset_across_devices: whether to use all available devices
for storing the underlying dataset. The upside is a larger
dataset capacity that fits into memory. Downsides are:
- execution of pmapped functions is usually slower than jitted
- few last elements in the dataset might be dropped (if not multiple)
- sampling is not 100% uniform, since each core will be doing sampling
only within its data chunk
The number of available devices must divide the batch_size evenly.
"""
# Read the whole dataset. We use artificially large batch_size to make sure
# we capture the whole dataset.
data = next(dataset.batch(1000000000).as_numpy_iterator())
self._dataset_size = jax.tree_flatten(
jax.tree_map(lambda x: x.shape[0], data))[0][0]
device = jax_utils._pmap_device_order()
if not shard_dataset_across_devices:
device = device[:1]
should_pmap = len(device) > 1
assert batch_size % len(device) == 0
self._dataset_size = self._dataset_size - self._dataset_size % len(device)
# len(device) needs to divide self._dataset_size evenly.
assert self._dataset_size % len(device) == 0
logging.info('Trying to load %s elements to %s', self._dataset_size, device)
logging.info('Dataset %s %s',
('before padding' if should_pmap else ''),
jax.tree_map(lambda x: x.shape, data))
if should_pmap:
shapes = jax.tree_map(lambda x: x.shape, data)
# Padding to a multiple of 128 is needed to avoid excessive copying on TPU
data = jax.tree_map(_pad, data)
logging.info('Dataset after padding %s',
jax.tree_map(lambda x: x.shape, data))
def split_and_put(x: jnp.ndarray) -> jnp.ndarray:
return jax.device_put_sharded(
np.split(x[:self._dataset_size], len(device)), devices=device)
self._jax_dataset = jax.tree_map(split_and_put, data)
else:
self._jax_dataset = jax.tree_map(jax.device_put, data)
self._key = (jnp.stack(jax.random.split(key, len(device)))
if should_pmap else key)
def sample_per_shard(data: Any,
key: jnp.ndarray) -> Tuple[Any, jnp.ndarray]:
key1, key2 = jax.random.split(key)
indices = jax.random.randint(
key1, (batch_size // len(device),),
minval=0,
maxval=self._dataset_size // len(device))
data_sample = jax.tree_map(lambda d: jnp.take(d, indices, axis=0), data)
return data_sample, key2
if should_pmap:
def sample(data, key):
data_sample, key = sample_per_shard(data, key)
# Gathering data on TPUs is much more efficient that doing so on a host
# since it avoids Host - Device communications.
data_sample = jax.lax.all_gather(
data_sample, axis_name=_PMAP_AXIS_NAME, axis=0, tiled=True)
data_sample = jax.tree_multimap(_unpad, data_sample, shapes)
return data_sample, key
pmapped_sample = jax.pmap(sample, axis_name=_PMAP_AXIS_NAME)
def sample_and_postprocess(key: jnp.ndarray) -> Tuple[Any, jnp.ndarray]:
data, key = pmapped_sample(self._jax_dataset, key)
# All pmapped devices return the same data, so we just take the one from
# the first device.
return jax.tree_map(lambda x: x[0], data), key
self._sample = sample_and_postprocess
else:
self._sample = jax.jit(
lambda key: sample_per_shard(self._jax_dataset, key))
def __next__(self) -> Any:
data, self._key = self._sample(self._key)
return data
@property
def dataset_size(self) -> int:
"""An integer of the dataset cardinality."""
return self._dataset_size
| deepmind/acme | acme/datasets/tfds.py | Python | apache-2.0 | 7,201 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2004 Robert Kaye
# Copyright (C) 2006-2009, 2011-2013, 2017 Lukáš Lalinský
# Copyright (C) 2007-2011, 2015, 2018-2020 Philipp Wolfer
# Copyright (C) 2008 Gary van der Merwe
# Copyright (C) 2008-2009 Nikolai Prokoschenko
# Copyright (C) 2009 Carlin Mangar
# Copyright (C) 2009 David Hilton
# Copyright (C) 2011-2014 Michael Wiencek
# Copyright (C) 2012 Erik Wasser
# Copyright (C) 2012 Johannes Weißl
# Copyright (C) 2012 noobie
# Copyright (C) 2012-2014 Wieland Hoffmann
# Copyright (C) 2013 Calvin Walton
# Copyright (C) 2013-2014 Ionuț Ciocîrlan
# Copyright (C) 2013-2014, 2017 Sophist-UK
# Copyright (C) 2013-2014, 2017-2019 Laurent Monin
# Copyright (C) 2016 Rahul Raturi
# Copyright (C) 2016 Ville Skyttä
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017-2018 Antonio Larrosa
# Copyright (C) 2019 Joel Lintunen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from collections import defaultdict
import fnmatch
from functools import partial
import os
import os.path
import re
import shutil
import unicodedata
from PyQt5 import QtCore
from picard import (
PICARD_APP_NAME,
config,
log,
)
from picard.const import QUERY_LIMIT
from picard.const.sys import (
IS_MACOS,
IS_WIN,
)
from picard.metadata import (
Metadata,
SimMatchTrack,
)
from picard.plugin import (
PluginFunctions,
PluginPriority,
)
from picard.util import (
decode_filename,
emptydir,
find_best_match,
format_time,
pathcmp,
thread,
tracknum_from_filename,
)
from picard.util.filenaming import make_short_filename
from picard.util.preservedtags import PreservedTags
from picard.util.scripttofilename import script_to_filename_with_metadata
from picard.util.tags import PRESERVED_TAGS
from picard.ui.item import Item
class File(QtCore.QObject, Item):
metadata_images_changed = QtCore.pyqtSignal()
NAME = None
UNDEFINED = -1
PENDING = 0
NORMAL = 1
CHANGED = 2
ERROR = 3
REMOVED = 4
LOOKUP_METADATA = 1
LOOKUP_ACOUSTID = 2
comparison_weights = {
"title": 13,
"artist": 4,
"album": 5,
"length": 10,
"totaltracks": 4,
"releasetype": 14,
"releasecountry": 2,
"format": 2,
"isvideo": 2,
}
class PreserveTimesStatError(Exception):
pass
class PreserveTimesUtimeError(Exception):
pass
# in order to significantly speed up performance, the number of pending
# files is cached, set @state.setter
num_pending_files = 0
def __init__(self, filename):
super().__init__()
self.filename = filename
self.base_filename = os.path.basename(filename)
self._state = File.UNDEFINED
self.state = File.PENDING
self.error = None
self.orig_metadata = Metadata()
self.metadata = Metadata()
self.similarity = 1.0
self.parent = None
self.lookup_task = None
self.item = None
self.acoustid_fingerprint = None
self.acoustid_length = 0
def __repr__(self):
return '<%s %r>' % (type(self).__name__, self.base_filename)
@property
def new_metadata(self):
return self.metadata
def load(self, callback):
thread.run_task(
partial(self._load_check, self.filename),
partial(self._loading_finished, callback),
priority=1)
def _load_check(self, filename):
# Check that file has not been removed since thread was queued
# Don't load if we are stopping.
if self.state != File.PENDING:
log.debug("File not loaded because it was removed: %r", self.filename)
return None
if self.tagger.stopping:
log.debug("File not loaded because %s is stopping: %r", PICARD_APP_NAME, self.filename)
return None
return self._load(filename)
def _load(self, filename):
"""Load metadata from the file."""
raise NotImplementedError
def _loading_finished(self, callback, result=None, error=None):
if self.state != File.PENDING or self.tagger.stopping:
return
if error is not None:
self.error = str(error)
self.state = self.ERROR
from picard.formats import supported_extensions
file_name, file_extension = os.path.splitext(self.base_filename)
if file_extension not in supported_extensions():
self.remove()
log.error('Unsupported media file %r wrongly loaded. Removing ...', self)
return
else:
self.error = None
self.state = self.NORMAL
self._copy_loaded_metadata(result)
# use cached fingerprint from file metadata
if not config.setting["ignore_existing_acoustid_fingerprints"]:
fingerprints = self.metadata.getall('acoustid_fingerprint')
if fingerprints:
self.set_acoustid_fingerprint(fingerprints[0])
run_file_post_load_processors(self)
self.update()
callback(self)
def _copy_loaded_metadata(self, metadata):
filename, _ = os.path.splitext(self.base_filename)
metadata['~length'] = format_time(metadata.length)
if 'tracknumber' not in metadata:
tracknumber = tracknum_from_filename(self.base_filename)
if tracknumber != -1:
tracknumber = str(tracknumber)
metadata['tracknumber'] = tracknumber
if 'title' not in metadata:
stripped_filename = filename.lstrip('0')
tnlen = len(tracknumber)
if stripped_filename[:tnlen] == tracknumber:
metadata['title'] = stripped_filename[tnlen:].lstrip()
if 'title' not in metadata:
metadata['title'] = filename
self.orig_metadata = metadata
self.metadata.copy(metadata)
def copy_metadata(self, metadata, preserve_deleted=True):
acoustid = self.metadata["acoustid_id"]
saved_metadata = {}
preserved_tags = PreservedTags()
for tag, values in self.orig_metadata.rawitems():
if tag in preserved_tags or tag in PRESERVED_TAGS:
saved_metadata[tag] = values
deleted_tags = self.metadata.deleted_tags
self.metadata.copy(metadata)
if preserve_deleted:
for tag in deleted_tags:
del self.metadata[tag]
for tag, values in saved_metadata.items():
self.metadata[tag] = values
if acoustid and "acoustid_id" not in metadata.deleted_tags:
self.metadata["acoustid_id"] = acoustid
self.metadata_images_changed.emit()
def keep_original_images(self):
self.metadata.images = self.orig_metadata.images.copy()
self.update()
self.metadata_images_changed.emit()
def has_error(self):
return self.state == File.ERROR
def save(self):
self.set_pending()
metadata = Metadata()
metadata.copy(self.metadata)
thread.run_task(
partial(self._save_and_rename, self.filename, metadata),
self._saving_finished,
priority=2,
thread_pool=self.tagger.save_thread_pool)
def _preserve_times(self, filename, func):
"""Save filename times before calling func, and set them again"""
try:
# https://docs.python.org/3/library/os.html#os.utime
# Since Python 3.3, ns parameter is available
# The best way to preserve exact times is to use the st_atime_ns and st_mtime_ns
# fields from the os.stat() result object with the ns parameter to utime.
st = os.stat(filename)
except OSError as why:
errmsg = "Couldn't read timestamps from %r: %s" % (filename, why)
raise self.PreserveTimesStatError(errmsg) from None
# if we can't read original times, don't call func and let caller handle this
func()
try:
os.utime(filename, ns=(st.st_atime_ns, st.st_mtime_ns))
except OSError as why:
errmsg = "Couldn't preserve timestamps for %r: %s" % (filename, why)
raise self.PreserveTimesUtimeError(errmsg) from None
return (st.st_atime_ns, st.st_mtime_ns)
def _save_and_rename(self, old_filename, metadata):
"""Save the metadata."""
# Check that file has not been removed since thread was queued
# Also don't save if we are stopping.
if self.state == File.REMOVED:
log.debug("File not saved because it was removed: %r", self.filename)
return None
if self.tagger.stopping:
log.debug("File not saved because %s is stopping: %r", PICARD_APP_NAME, self.filename)
return None
new_filename = old_filename
if not config.setting["dont_write_tags"]:
save = partial(self._save, old_filename, metadata)
if config.setting["preserve_timestamps"]:
try:
self._preserve_times(old_filename, save)
except self.PreserveTimesUtimeError as why:
log.warning(why)
else:
save()
# Rename files
if config.setting["rename_files"] or config.setting["move_files"]:
new_filename = self._rename(old_filename, metadata)
# Move extra files (images, playlists, etc.)
if config.setting["move_files"] and config.setting["move_additional_files"]:
self._move_additional_files(old_filename, new_filename)
# Delete empty directories
if config.setting["delete_empty_dirs"]:
dirname = os.path.dirname(old_filename)
try:
emptydir.rm_empty_dir(dirname)
head, tail = os.path.split(dirname)
if not tail:
head, tail = os.path.split(head)
while head and tail:
emptydir.rm_empty_dir(head)
head, tail = os.path.split(head)
except OSError as why:
log.warning("Error removing directory: %s", why)
except emptydir.SkipRemoveDir as why:
log.debug("Not removing empty directory: %s", why)
# Save cover art images
if config.setting["save_images_to_files"]:
self._save_images(os.path.dirname(new_filename), metadata)
return new_filename
def _saving_finished(self, result=None, error=None):
# Handle file removed before save
# Result is None if save was skipped
if ((self.state == File.REMOVED or self.tagger.stopping)
and result is None):
return
old_filename = new_filename = self.filename
if error is not None:
self.error = str(error)
self.state = File.ERROR
else:
self.filename = new_filename = result
self.base_filename = os.path.basename(new_filename)
length = self.orig_metadata.length
temp_info = {}
for info in ('~bitrate', '~sample_rate', '~channels',
'~bits_per_sample', '~format'):
temp_info[info] = self.orig_metadata[info]
# Data is copied from New to Original because New may be
# a subclass to handle id3v23
if config.setting["clear_existing_tags"]:
self.orig_metadata.copy(self.new_metadata)
else:
self.orig_metadata.update(self.new_metadata)
# After saving deleted tags should no longer be marked deleted
self.new_metadata.clear_deleted()
self.orig_metadata.clear_deleted()
self.orig_metadata.length = length
self.orig_metadata['~length'] = format_time(length)
for k, v in temp_info.items():
self.orig_metadata[k] = v
self.error = None
self.clear_pending()
self._add_path_to_metadata(self.orig_metadata)
self.metadata_images_changed.emit()
# run post save hook
run_file_post_save_processors(self)
# Force update to ensure file status icon changes immediately after save
self.update()
if self.state != File.REMOVED:
del self.tagger.files[old_filename]
self.tagger.files[new_filename] = self
if self.tagger.stopping:
log.debug("Save of %r completed before stopping Picard", self.filename)
def _save(self, filename, metadata):
"""Save the metadata."""
raise NotImplementedError
def _script_to_filename(self, naming_format, file_metadata, file_extension, settings=None):
if settings is None:
settings = config.setting
metadata = Metadata()
if settings["clear_existing_tags"]:
metadata.copy(file_metadata)
else:
metadata.copy(self.orig_metadata)
metadata.update(file_metadata)
(filename, new_metadata) = script_to_filename_with_metadata(
naming_format, metadata, file=self, settings=settings)
# NOTE: the script_to_filename strips the extension away
ext = new_metadata.get('~extension', file_extension)
return filename + '.' + ext.lstrip('.')
def _fixed_splitext(self, filename):
# In case the filename is blank and only has the extension
# the real extension is in new_filename and ext is blank
new_filename, ext = os.path.splitext(filename)
if ext == '' and new_filename.lower() in self.EXTENSIONS:
ext = new_filename
new_filename = ''
return new_filename, ext
def _format_filename(self, new_dirname, new_filename, metadata, settings):
old_filename = new_filename
new_filename, ext = self._fixed_splitext(new_filename)
ext = ext.lower()
new_filename = new_filename + ext
# expand the naming format
naming_format = settings['file_naming_format']
if naming_format:
new_filename = self._script_to_filename(naming_format, metadata, ext, settings)
if not settings['rename_files']:
new_filename = os.path.join(os.path.dirname(new_filename), old_filename)
if not settings['move_files']:
new_filename = os.path.basename(new_filename)
win_compat = IS_WIN or settings['windows_compatibility']
new_filename = make_short_filename(new_dirname, new_filename,
win_compat)
# TODO: move following logic under util.filenaming
# (and reconsider its necessity)
# win32 compatibility fixes
if win_compat:
new_filename = new_filename.replace('./', '_/').replace('.\\', '_\\')
# replace . at the beginning of file and directory names
new_filename = new_filename.replace('/.', '/_').replace('\\.', '\\_')
if new_filename.startswith('.'):
new_filename = '_' + new_filename[1:]
# Fix for precomposed characters on OSX
if IS_MACOS:
new_filename = unicodedata.normalize("NFD", new_filename)
return new_filename
def make_filename(self, filename, metadata, settings=None):
"""Constructs file name based on metadata and file naming formats."""
if settings is None:
settings = config.setting
if settings["move_files"]:
new_dirname = settings["move_files_to"]
if not os.path.isabs(new_dirname):
new_dirname = os.path.normpath(os.path.join(os.path.dirname(filename), new_dirname))
else:
new_dirname = os.path.dirname(filename)
new_filename = os.path.basename(filename)
if settings["rename_files"] or settings["move_files"]:
new_filename = self._format_filename(new_dirname, new_filename, metadata, settings)
new_path = os.path.join(new_dirname, new_filename)
try:
return os.path.realpath(new_path)
except FileNotFoundError:
# os.path.realpath can fail if cwd doesn't exist
return new_path
def _rename(self, old_filename, metadata):
new_filename, ext = os.path.splitext(
self.make_filename(old_filename, metadata))
if old_filename == new_filename + ext:
return old_filename
new_dirname = os.path.dirname(new_filename)
if not os.path.isdir(new_dirname):
os.makedirs(new_dirname)
tmp_filename = new_filename
i = 1
while (not pathcmp(old_filename, new_filename + ext)
and os.path.exists(new_filename + ext)):
new_filename = "%s (%d)" % (tmp_filename, i)
i += 1
new_filename = new_filename + ext
log.debug("Moving file %r => %r", old_filename, new_filename)
shutil.move(old_filename, new_filename)
return new_filename
def _save_images(self, dirname, metadata):
"""Save the cover images to disk."""
if not metadata.images:
return
counters = defaultdict(lambda: 0)
images = []
if config.setting["caa_save_single_front_image"]:
images = [metadata.images.get_front_image()]
if not images:
images = metadata.images
for image in images:
image.save(dirname, metadata, counters)
def _move_additional_files(self, old_filename, new_filename):
"""Move extra files, like images, playlists..."""
new_path = os.path.dirname(new_filename)
old_path = os.path.dirname(old_filename)
if new_path == old_path:
# skip, same directory, nothing to move
return
patterns = config.setting["move_additional_files_pattern"]
pattern_regexes = set()
for pattern in patterns.split():
pattern = pattern.strip()
if not pattern:
continue
pattern_regex = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
match_hidden = pattern.startswith('.')
pattern_regexes.add((pattern_regex, match_hidden))
if not pattern_regexes:
return
moves = set()
try:
# TODO: use with statement with python 3.6+
for entry in os.scandir(old_path):
is_hidden = entry.name.startswith('.')
for pattern_regex, match_hidden in pattern_regexes:
if is_hidden and not match_hidden:
continue
if pattern_regex.match(entry.name):
new_file_path = os.path.join(new_path, entry.name)
moves.add((entry.path, new_file_path))
break # we are done with this file
except OSError as why:
log.error("Failed to scan %r: %s", old_path, why)
return
for old_file_path, new_file_path in moves:
# FIXME we shouldn't do this from a thread!
if self.tagger.files.get(decode_filename(old_file_path)):
log.debug("File loaded in the tagger, not moving %r", old_file_path)
continue
log.debug("Moving %r to %r", old_file_path, new_file_path)
try:
shutil.move(old_file_path, new_file_path)
except OSError as why:
log.error("Failed to move %r to %r: %s", old_file_path,
new_file_path, why)
def remove(self, from_parent=True):
if from_parent and self.parent:
log.debug("Removing %r from %r", self, self.parent)
self.parent.remove_file(self)
self.tagger.acoustidmanager.remove(self)
self.state = File.REMOVED
def move(self, parent):
if parent != self.parent:
log.debug("Moving %r from %r to %r", self, self.parent, parent)
self.clear_lookup_task()
self.tagger._acoustid.stop_analyze(self)
if self.parent:
self.clear_pending()
self.parent.remove_file(self)
self.parent = parent
self.parent.add_file(self)
self.acoustid_update()
def _move(self, parent):
if parent != self.parent:
log.debug("Moving %r from %r to %r", self, self.parent, parent)
if self.parent:
self.parent.remove_file(self)
self.parent = parent
self.acoustid_update()
def set_acoustid_fingerprint(self, fingerprint, length=None):
if not fingerprint:
self.acoustid_fingerprint = None
self.acoustid_length = 0
self.tagger.acoustidmanager.remove(self)
elif fingerprint != self.acoustid_fingerprint:
self.acoustid_fingerprint = fingerprint
self.acoustid_length = length or self.metadata.length // 1000
self.tagger.acoustidmanager.add(self, None)
self.acoustid_update()
def acoustid_update(self):
recording_id = None
if self.parent and hasattr(self.parent, 'orig_metadata'):
recording_id = self.parent.orig_metadata['musicbrainz_recordingid']
if not recording_id:
recording_id = self.metadata['musicbrainz_recordingid']
self.tagger.acoustidmanager.update(self, recording_id)
self.update_item()
@classmethod
def supports_tag(cls, name):
"""Returns whether tag ``name`` can be saved to the file."""
return True
def is_saved(self):
return self.similarity == 1.0 and self.state == File.NORMAL
def update(self, signal=True):
new_metadata = self.new_metadata
names = set(new_metadata.keys())
names.update(self.orig_metadata.keys())
clear_existing_tags = config.setting["clear_existing_tags"]
ignored_tags = config.setting["compare_ignore_tags"]
for name in names:
if (not name.startswith('~') and self.supports_tag(name)
and name not in ignored_tags):
new_values = new_metadata.getall(name)
if not (new_values or clear_existing_tags
or name in new_metadata.deleted_tags):
continue
orig_values = self.orig_metadata.getall(name)
if orig_values != new_values:
self.similarity = self.orig_metadata.compare(new_metadata, ignored_tags)
if self.state == File.NORMAL:
self.state = File.CHANGED
break
else:
if (self.metadata.images
and self.orig_metadata.images != self.metadata.images):
self.state = File.CHANGED
else:
self.similarity = 1.0
if self.state == File.CHANGED:
self.state = File.NORMAL
if signal:
log.debug("Updating file %r", self)
self.update_item()
def can_save(self):
"""Return if this object can be saved."""
return True
def can_remove(self):
"""Return if this object can be removed."""
return True
def can_edit_tags(self):
"""Return if this object supports tag editing."""
return True
def can_analyze(self):
"""Return if this object can be fingerprinted."""
return True
def can_autotag(self):
return True
def can_refresh(self):
return False
def can_view_info(self):
return True
def _info(self, metadata, file):
if hasattr(file.info, 'length'):
metadata.length = int(file.info.length * 1000)
if hasattr(file.info, 'bitrate') and file.info.bitrate:
metadata['~bitrate'] = file.info.bitrate / 1000.0
if hasattr(file.info, 'sample_rate') and file.info.sample_rate:
metadata['~sample_rate'] = file.info.sample_rate
if hasattr(file.info, 'channels') and file.info.channels:
metadata['~channels'] = file.info.channels
if hasattr(file.info, 'bits_per_sample') and file.info.bits_per_sample:
metadata['~bits_per_sample'] = file.info.bits_per_sample
if self.NAME:
metadata['~format'] = self.NAME
else:
metadata['~format'] = self.__class__.__name__.replace('File', '')
self._add_path_to_metadata(metadata)
def _add_path_to_metadata(self, metadata):
metadata['~dirname'] = os.path.dirname(self.filename)
filename, extension = os.path.splitext(os.path.basename(self.filename))
metadata['~filename'] = filename
metadata['~extension'] = extension.lower()[1:]
@property
def state(self):
"""Current state of the File object"""
return self._state
@state.setter
def state(self, state):
if state == self._state:
return
if state == File.PENDING:
File.num_pending_files += 1
self.tagger.tagger_stats_changed.emit()
elif self._state == File.PENDING:
File.num_pending_files -= 1
self.tagger.tagger_stats_changed.emit()
self._state = state
def column(self, column):
m = self.metadata
if column == "title" and not m["title"]:
return self.base_filename
return m[column]
def _lookup_finished(self, lookuptype, document, http, error):
self.lookup_task = None
if self.state == File.REMOVED:
return
if error:
log.error("Network error encountered during the lookup for %s. Error code: %s",
self.filename, error)
try:
tracks = document['recordings']
except (KeyError, TypeError):
tracks = None
def statusbar(message):
self.tagger.window.set_statusbar_message(
message,
{'filename': self.filename},
timeout=3000
)
if tracks:
if lookuptype == File.LOOKUP_ACOUSTID:
threshold = 0
else:
threshold = config.setting['file_lookup_threshold']
trackmatch = self._match_to_track(tracks, threshold=threshold)
if trackmatch is None:
statusbar(N_("No matching tracks above the threshold for file '%(filename)s'"))
else:
statusbar(N_("File '%(filename)s' identified!"))
(track_id, release_group_id, release_id, acoustid, node) = trackmatch
if lookuptype == File.LOOKUP_ACOUSTID:
self.metadata['acoustid_id'] = acoustid
self.tagger.acoustidmanager.add(self, track_id)
if release_group_id is not None:
releasegroup = self.tagger.get_release_group_by_id(release_group_id)
releasegroup.loaded_albums.add(release_id)
self.tagger.move_file_to_track(self, release_id, track_id)
else:
self.tagger.move_file_to_nat(self, track_id, node=node)
else:
statusbar(N_("No matching tracks for file '%(filename)s'"))
self.clear_pending()
def _match_to_track(self, tracks, threshold=0):
# multiple matches -- calculate similarities to each of them
def candidates():
for track in tracks:
yield self.metadata.compare_to_track(track, self.comparison_weights)
no_match = SimMatchTrack(similarity=-1, releasegroup=None, release=None, track=None)
best_match = find_best_match(candidates, no_match)
if best_match.similarity < threshold:
return None
else:
track_id = best_match.result.track['id']
release_group_id, release_id, node = None, None, None
acoustid = best_match.result.track.get('acoustid', None)
if best_match.result.release:
release_group_id = best_match.result.releasegroup['id']
release_id = best_match.result.release['id']
elif 'title' in best_match.result.track:
node = best_match.result.track
return (track_id, release_group_id, release_id, acoustid, node)
def lookup_metadata(self):
"""Try to identify the file using the existing metadata."""
if self.lookup_task:
return
self.tagger.window.set_statusbar_message(
N_("Looking up the metadata for file %(filename)s ..."),
{'filename': self.filename}
)
self.clear_lookup_task()
metadata = self.metadata
self.set_pending()
self.lookup_task = self.tagger.mb_api.find_tracks(
partial(self._lookup_finished, File.LOOKUP_METADATA),
track=metadata['title'],
artist=metadata['artist'],
release=metadata['album'],
tnum=metadata['tracknumber'],
tracks=metadata['totaltracks'],
qdur=str(metadata.length // 2000),
isrc=metadata['isrc'],
limit=QUERY_LIMIT)
def clear_lookup_task(self):
if self.lookup_task:
self.tagger.webservice.remove_task(self.lookup_task)
self.lookup_task = None
def set_pending(self):
if self.state != File.REMOVED:
self.state = File.PENDING
self.update_item()
def clear_pending(self):
if self.state == File.PENDING:
self.state = File.NORMAL if self.similarity == 1.0 else File.CHANGED
self.update_item()
def update_item(self):
if self.item:
self.item.update()
def iterfiles(self, save=False):
yield self
@property
def tracknumber(self):
"""The track number as an int."""
try:
return int(self.metadata["tracknumber"])
except BaseException:
return 0
@property
def discnumber(self):
"""The disc number as an int."""
try:
return int(self.metadata["discnumber"])
except BaseException:
return 0
_file_post_load_processors = PluginFunctions(label='file_post_load_processors')
_file_post_addition_to_track_processors = PluginFunctions(label='file_post_addition_to_track_processors')
_file_post_removal_from_track_processors = PluginFunctions(label='file_post_removal_from_track_processors')
_file_post_save_processors = PluginFunctions(label='file_post_save_processors')
def register_file_post_load_processor(function, priority=PluginPriority.NORMAL):
"""Registers a file-loaded processor.
Args:
function: function to call after file has been loaded, it will be passed the file object
priority: optional, PluginPriority.NORMAL by default
Returns:
None
"""
_file_post_load_processors.register(function.__module__, function, priority)
def register_file_post_addition_to_track_processor(function, priority=PluginPriority.NORMAL):
"""Registers a file-added-to-track processor.
Args:
function: function to call after file addition, it will be passed the track and file objects
priority: optional, PluginPriority.NORMAL by default
Returns:
None
"""
_file_post_addition_to_track_processors.register(function.__module__, function, priority)
def register_file_post_removal_from_track_processor(function, priority=PluginPriority.NORMAL):
"""Registers a file-removed-from-track processor.
Args:
function: function to call after file removal, it will be passed the track and file objects
priority: optional, PluginPriority.NORMAL by default
Returns:
None
"""
_file_post_removal_from_track_processors.register(function.__module__, function, priority)
def register_file_post_save_processor(function, priority=PluginPriority.NORMAL):
"""Registers file saved processor.
Args:
function: function to call after save, it will be passed the file object
priority: optional, PluginPriority.NORMAL by default
Returns:
None
"""
_file_post_save_processors.register(function.__module__, function, priority)
def run_file_post_load_processors(file_object):
_file_post_load_processors.run(file_object)
def run_file_post_addition_to_track_processors(track_object, file_object):
_file_post_addition_to_track_processors.run(track_object, file_object)
def run_file_post_removal_from_track_processors(track_object, file_object):
_file_post_removal_from_track_processors.run(track_object, file_object)
def run_file_post_save_processors(file_object):
_file_post_save_processors.run(file_object)
| Sophist-UK/Sophist_picard | picard/file.py | Python | gpl-2.0 | 33,726 |
'''
Modulo Movimiento Nanometros
@author: P1R0
import ObjSerial, sys;
ObjSer = ObjSerial.ObjSerial(0,9600)
ObjSer.cts = True
ObjSer.dtr = True
ObjSer.bytesize = 8
'''
SxN = 59.71 #Constante de Calibracion del Motor
#Funcion para inicializar Monocromador
def init(ObjSer,A):
ObjSer.flushOutput()
ObjSer.write(unicode("A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0A\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0R\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0U1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0Y1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0Y0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T1000\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0F-\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T4000\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K0\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0M99999\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0K1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0V1\r\n"))
echo(ObjSer)
ObjSer.write(unicode("0T400\r\n"))
echo(ObjSer)
#en la posicion cero
ObjSer.write(unicode("0M-3925\r\n"))
echo(ObjSer)
#En de estar fuera de rango mandamos como parametro 1
if A == 1:
ObjSer.write(unicode("0M3925\r\n"))
echo(ObjSer)
return 0
#funcion para aproximar errores metodo de interpolacion
def Error(x):
Y = [0,
0.010373807,
-0.05124284,
-0.227092782,
-0.572418858,
-1.150211522,
-2.019461229,
-3.247663205,
-4.904050745,
-7.062119076,
-9.803353877,
-13.21724083,
-17.39877039,
-22.45717585,
-28.51818573,
-35.71928571,
-44.22644716,
-54.22539859,
-65.94810183,
-79.66102345,
95.70661095,
-114.4980595,
-136.5895354,
-162.693691,
-193.8151306,
-231.3914014,
-277.6754313,
-336.5191712,
-415.6610186,
-536.5034235,
-763.8268297,
-804.7677106];
X = [0,
50.002,
99.999,
149.999,
199.997,
249.997,
300.007,
349.993,
400.003,
449.997,
499.994,
550.005,
600.002,
649.993,
700.003,
749.995,
800.004,
849.995,
900.004,
949.999,
1000.006,
1049.997,
1100.004,
1150.001,
1200.005,
1250.002,
1300,
1349.999,
1399.998,
449.998,
1490,
1492];
i = 0;
while x > X[i]:
x0=X[i];
y0=Y[i];
x1=X[i+1];
y1=Y[i+1];
i=i+1;
r=y1-y0;
d=r/(x1-x0);
y=y0+(d*(x-x0));
return y
#funcion para calcular y mover el motor
def Calcula(ObjSer,Nm,LastPos):
Er=Error(Nm);
NmyEr = Nm - Er;
uS = NmyEr * SxN;
dif = uS - int(uS);
if dif > 0.5:
uS = int(uS) + 1;
else:
uS = int(uS);
Mover = uS - LastPos;
print "La diferencia a mover es: %d" % Mover;
Mueve(ObjSer,Mover);
LastPos = uS;
return LastPos
#Funcion para llamar al eco del ObjSerial
def echo(ObjSer):
line = ObjSer.readline()
print line
#Funcion para mover el motor
def Mueve(ObjSer, Mover):
#mover Full Step cuando recibe como parametros microSteps
MoverFS = ((Mover-3) / 5);
ObjSer.flushOutput();
ObjSer.write(unicode("0U0\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0V1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0T1000\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0M%d\r\n" % MoverFS));
echo(ObjSer);
ObjSer.write(unicode("0U1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0V1\r\n"));
echo(ObjSer);
ObjSer.write(unicode("0T400\r\n"));
echo(ObjSer);
#ultimos 3 microsteps para una aproximacion mas suave.
ObjSer.write(unicode("0M3\r\n"));
echo(ObjSer);
'''
if __name__ == "__main__":
N = 0;
LastPos = 0;
init(0);
while 1:
while type(N)!= float:
try:
N = raw_input("Ingresa Nanometros o quit para cerrar:");
if N == "quit":
ObjSer.close();
sys.exit(0);
N = float(N);
except (ValueError, TypeError):
print "error, el valor debe ObjSer entero o flotante";
LastPos = Calcula(N,LastPos);
print "los microspasos totales son: %d" % LastPos;
N=0
''' | P1R/freeMonoCrom | MM.py | Python | gpl-2.0 | 5,424 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
"""Encoding related utilities."""
import re
import sys ##PY25
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in range(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
[chr(i) for i in range(32, 127)] +
[r'\%03o' % i for i in range(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, str) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if sys.version_info[0] < 3: ##PY25
##!PY25 if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| cherrishes/weilai | xingxing/protobuf/python/lib/Python3.4/google/protobuf/text_encoding.py | Python | apache-2.0 | 4,685 |
"""SCons.Tool.sgicc
Tool-specific initialization for MIPSPro cc on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgicc.py 3842 2008/12/20 22:59:52 scons"
import cc
def generate(env):
"""Add Builders and construction variables for gcc to an Environment."""
cc.generate(env)
env['CXX'] = 'CC'
env['SHOBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
def exists(env):
return env.Detect('cc')
| makinacorpus/mapnik2 | scons/scons-local-1.2.0/SCons/Tool/sgicc.py | Python | lgpl-2.1 | 1,749 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2016 Simone Donadello
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
PRG_NAME, PRG_VERSION = "EC - Experiment Control", "0.6.2"
GREEN = "#228B22"
RED = "#B22222"
BLUE = "#0000CD"
| simondona/exp-control-bec-tn | gui/constants.py | Python | gpl-3.0 | 835 |
"""The Netio switch component."""
from collections import namedtuple
from datetime import timedelta
import logging
from pynetio import Netio
import voluptuous as vol
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = "start_date"
ATTR_TOTAL_CONSUMPTION_KWH = "total_energy_kwh"
CONF_OUTLETS = "outlets"
DEFAULT_PORT = 1234
DEFAULT_USERNAME = "admin"
Device = namedtuple("device", ["netio", "entities"])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = "/api/netio/{host}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for _, value in DEVICES.items():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = "api:netio"
@callback
def get(self, request, host):
"""Request handler."""
data = request.query
states, consumptions, cumulated_consumptions, start_dates = [], [], [], []
for i in range(1, 5):
out = "output%d" % i
states.append(data.get("%s_state" % out) == STATE_ON)
consumptions.append(float(data.get("%s_consumption" % out, 0)))
cumulated_consumptions.append(
float(data.get("%s_cumulatedConsumption" % out, 0)) / 1000
)
start_dates.append(data.get("%s_consumptionStart" % out, ""))
_LOGGER.debug(
"%s: %s, %s, %s since %s",
host,
states,
consumptions,
cumulated_consumptions,
start_dates,
)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
dev.async_write_ha_state()
return self.json(True)
class NetioSwitch(SwitchEntity):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, "telnet")
def turn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list("uuuu")
val[int(self.outlet) - 1] = "1" if value else "0"
self.netio.get("port list %s" % "".join(val))
self.netio.states[int(self.outlet) - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[int(self.outlet) - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split("|")[0],
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[int(self.outlet) - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[int(self.outlet) - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[int(self.outlet) - 1]
| nkgilley/home-assistant | homeassistant/components/netio/switch.py | Python | apache-2.0 | 5,406 |
"""
Django Generic Settings
"""
from GenSettingDict import GenSettingDict
DEFAULTSETTINGS = GenSettingDict()
__title__ = 'Django Generic Settings'
__version__ = '0.2.0'
__author__ = 'Paul Gueltekin'
__license__ = 'LGPL 2'
__copyright__ = 'Copyright 2015 Paul Gueltekin'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
DEFAULTSETTINGS = GenSettingDict() | paulgueltekin/django-gen-settings | django_gen_settings/__init__.py | Python | lgpl-3.0 | 486 |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json
import pytest
from telegram import LabeledPrice
@pytest.fixture(scope='class')
def labeled_price():
return LabeledPrice(TestLabeledPrice.label, TestLabeledPrice.amount)
class TestLabeledPrice:
label = 'label'
amount = 100
def test_expected_values(self, labeled_price):
assert labeled_price.label == self.label
assert labeled_price.amount == self.amount
def test_to_json(self, labeled_price):
json.loads(labeled_price.to_json())
def test_to_dict(self, labeled_price):
labeledprice_dict = labeled_price.to_dict()
assert isinstance(labeledprice_dict, dict)
assert labeledprice_dict['label'] == labeled_price.label
assert labeledprice_dict['amount'] == labeled_price.amount
| rogerscristo/BotFWD | env/lib/python3.6/site-packages/pytests/test_labeledprice.py | Python | mit | 1,631 |
#!/usr/bin/env python
# Taken and modified from:
# http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
import atexit
import os
import signal
import sys
import time
if (hasattr(os, "devnull")):
DEVNULL = os.devnull
else:
DEVNULL = "/dev/null"
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the _run() method
"""
def __init__(self, serviceName, pidfile, stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL):
super(Daemon, self).__init__()
self._serviceName = serviceName
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self._pidfile = pidfile
def _daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self._stdin, 'r')
so = file(self._stdout, 'a+')
se = file(self._stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile and subsys file
pid = str(os.getpid())
file(self._pidfile,'w+').write("%s\n" % pid)
if os.path.exists('/var/lock/subsys'):
fh = open(os.path.join('/var/lock/subsys', self._serviceName), 'w')
fh.close()
def _delpid(self):
if os.path.exists(self._pidfile):
os.remove(self._pidfile)
subsysPath = os.path.join('/var/lock/subsys', self._serviceName)
if os.path.exists(subsysPath):
os.remove(subsysPath)
self._cleanup()
def start(self, daemonize=True):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self._pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self._pidfile)
sys.exit(1)
# Start the daemon
if daemonize:
self._daemonize()
# Cleanup handling
def termHandler(signum, frame):
self._delpid()
signal.signal(signal.SIGTERM, termHandler)
atexit.register(self._delpid)
# Run the daemon
self._run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self._pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self._pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self._pidfile):
os.remove(self._pidfile)
else:
print str(err)
sys.exit(1)
def foreground(self):
self.start(daemonize=False)
def restart(self, daemonize=True):
"""
Restart the daemon
"""
self.stop()
self.start(daemonize)
def _run(self):
"""
You should override this method when you subclass Daemon. It will be
called after the process has been daemonized by start() or restart().
"""
raise NotImplementedError('You must implement the method in your class.')
def _cleanup(self):
"""
You should override this method when you subclass Daemon. It will be
called when the daemon exits.
"""
raise NotImplementedError('You must implement the method in your class.')
| Nvizible/shotgunEvents | src/daemonizer.py | Python | mit | 5,101 |
"""Implementation of scheduler reports API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import logging
import time
import kazoo.exceptions
from treadmill import context
from treadmill import exc
from treadmill import reports
from treadmill import zknamespace as z
from treadmill import logcontext as lc
from treadmill import scheduler as tm_sched
from treadmill.scheduler import loader
from treadmill.scheduler import zkbackend
_LOGGER = logging.getLogger(__name__)
_CACHE_TIMEOUT = 180 # 3 mins
_LAST_CACHE_UPDATE = 0
_RO_SHEDULER_INSTANCE = None
def get_readonly_scheduler():
"""Prepare a readonly master."""
# C0103(invalid-name): invalid variable name
# W0603(global-statement): using the global statement
# pylint: disable=C0103,W0603
global _RO_SHEDULER_INSTANCE, _LAST_CACHE_UPDATE
if (time.time() - _LAST_CACHE_UPDATE > _CACHE_TIMEOUT or
not _RO_SHEDULER_INSTANCE):
tm_sched.DIMENSION_COUNT = 3
_RO_SHEDULER_INSTANCE = loader.Loader(
zkbackend.ZkReadonlyBackend(context.GLOBAL.zk.conn),
context.GLOBAL.cell
)
_RO_SHEDULER_INSTANCE.load_model()
_LAST_CACHE_UPDATE = time.time()
return _RO_SHEDULER_INSTANCE
def mk_explainapi():
"""API factory function returning _ExplainAPI class."""
class _ExplainAPI:
"""API object implementing the scheduler explain functionality."""
def __init__(self):
self.get = _explain
return _ExplainAPI
class API:
"""Scheduler reports API."""
def __init__(self):
def get(report_type, match=None, partition=None):
"""Fetch report from ZooKeeper and return it as a DataFrame."""
try:
data, _meta = context.GLOBAL.zk.conn.get(
z.path.state_report(report_type)
)
df = reports.deserialize_dataframe(data)
if match:
df = _match_by_name(df, report_type, match)
if partition:
df = _match_by_partition(df, partition)
return df
except kazoo.exceptions.NoNodeError:
raise KeyError(report_type)
self.get = get
self.explain = mk_explainapi()()
def _match_by_name(dataframe, report_type, match):
"""Interpret match with report type and return resulting DataFrame.
"""
pk_match = {
'allocations': 'name',
'apps': 'instance',
'servers': 'name'
}
match = fnmatch.translate(match)
subidx = dataframe[pk_match[report_type]].str.match(match)
return dataframe.loc[subidx].reset_index(drop=True)
def _match_by_partition(dataframe, partition):
"""Filter out dataframes that don't match partition.
"""
partition = fnmatch.translate(partition)
subidx = dataframe['partition'].str.match(partition)
return dataframe.loc[subidx].reset_index(drop=True)
def _explain(inst_id):
"""Explain application placement"""
with lc.LogContext(_LOGGER, inst_id):
start = time.time()
ro_scheduler = get_readonly_scheduler()
_LOGGER.info('ro_scheduler was ready in %s secs', time.time() - start)
try:
instance = ro_scheduler.cell.apps[inst_id]
except KeyError:
raise exc.NotFoundError(inst_id)
if instance.server:
raise exc.FoundError(
'instance {} is already placed on {}'.format(
inst_id, instance.server
)
)
return reports.explain_placement(
ro_scheduler.cell, instance, 'servers'
)
| Morgan-Stanley/treadmill | lib/python/treadmill/api/scheduler.py | Python | apache-2.0 | 3,769 |
"""
Union-Find:
1. Given a set of numbers, Do Random Joins as tuples or sets;
2. Given a set of Unions, Find Connections;
Quick Weighted Union:
- Array arr contains all numbers as indexes, the size of arr is noted;
- Sub Module Get_Root finds the root of the current key. if cur_key root is not set to this root, sets it.
- A merge checks for size of each arr, adds the root of Smaller Tree to Larger Tree. Size of Tree is determined either by:
- Height
- Number of Nodes
- Rank
"""
def find_connection(num1, num2, arr):
'''Find if connection exists between two nodes'''
#O(N)
return ( get_root(num1, arr) == get_root(num2, arr) and get_root(num1, arr) != None )
def get_root(num, arr):
'''
Given an array of weighted root indexes arr:
1. Gets the root of the num in the current tree which its in
2. returns the root
'''
if arr[num] == None: return None
#O(N)
while ( num != arr[num] ):
#set the current key's root if not set
#set_root(num, arr[num])
num = arr[num]
return num
def set_root(num, root, arr):
'''
1. Sets the given root of current num in arr if not set
'''
#O(1)
arr[num] = root
def set_all_root(num1_root, num2_root, arr):
'''
~lgN
Set root of all numbers pointing to the the num @num1_root to the num @ num2_root in arr
'''
res = arr
ln = len(res)/2
stack = [res[:ln], res[ln:]]
while stack:
#lgN
res = stack.pop()
if len(res) != 1:
ln = len(res)/2
stack.insert(0, res[ln:])
stack.insert(0, res[:ln])
elif res[0] == num1_root:
#O(1)
set_root(res[0], num2_root, arr)
def get_size(key, arr):
'''
Given a size_arr and leaf key, find the size of the tree given
arr: array of roots and their tree's corresponding size count
Returns:
count
'''
count = 0
#O(N)
while ( key != arr[key] ):
key = arr[key]
count += 1
return count
def get_root_set_size(num, arr, size_arr):
'''
same as get_root but with side effect of setting size as well
'''
if arr[num] == None: return None
count = 1
#O(N)
while ( num != arr[num] ):
#set the current key's root if not set
#set_root(num, arr[num])
num = arr[num]
count += 1
set_size(num, count, size_arr)
return num
def set_size(root_key, count, size_arr):
'''
Evaluate the size of each tree from arr and set it in size_arr
root_key: Root key of the tree
count: Number of nodes in a tree
size_arr: hashmap/dict of roots and their tree's corresponding size count
'''
#O(1)
size_arr[root_key] = count
def find_greatest_element_component(num, arr):
'''
Add a method find() to the union-find data type so that find(i) returns
the largest element in the connected component containing i.
The operations, union(), connected(), and find() should all take logarithmic time or better.
For example, if one of the connected components is {1,2,6,9},
then the find() method should return 9 for each of the four elements in
the connected components because 9 is larger 1, 2, and 6.
'''
maximus = num
while ( num != arr[num] ):
num = arr[num]
if (maximus < num):
maximus = num
return maximus
def find_lowest_element_component(num, arr):
'''
same as find_greatest_element_component except it finds the minimum element in the Tree component
'''
minimus = num
while ( num != arr[num] ):
num = arr[num]
if (minimus > num):
minimus = num
return minimus
def qwc(num1, num2, arr, size_arr):
'''
Weighted Quick Union with Path Compression
Given the size_arr sized array and two weighted numbers num1 and num2:
1. Find their roots.
2. If same, return
3. If different:
- Find size of each root's tree;
- Cmp for the smaller tree;
- Point the self reflecting root of smaller tree parent node to larger tree parent node;
4. return merged tree;
Params:
arr: Array of keys in question pointing to its immediate root
size_arr: hashmap/dict of roots and their tree's corresponding size count
'''
#Find if there alrdy exists a connection no need for union
if find_connection(num1, num2, arr): return None #O(N)
#get the root key of the given current key #~lgN - ~(N)
num1_root = get_root(num1, arr)
num2_root = get_root(num2, arr)
#set the current key's root if not set
#O(1)
set_root(num1, num1_root, arr)
set_root(num2, num2_root, arr)
#Get the size of the current root keys tree - O(1)
num1_tree_size = size_arr[num1_root]
num2_tree_size = size_arr[num2_root]
#set the smaller tree's root to bigger one
if ( num2_tree_size > num1_tree_size ):
set_root(num1_root, num2_root, arr)
set_all_root(num1_root, num2_root, arr)
set_size(num2_root, size_arr[num2_root]+size_arr[num1_root], size_arr) #size_arr[num2_root] += size_arr[num1_root]
#set_size( num1_root, size_arr[num2_root], size_arr ) #size_arr[num1_root] = size_arr[num2_root]
else:
set_root(num2_root, num1_root, arr)
set_all_root(num2_root, num1_root, arr)
set_size(num1_root, size_arr[num1_root]+size_arr[num2_root], size_arr)
#set_size(num2_root, size_arr[num1_root], size_arr)
return arr
if __name__ == "__main__":
#for debugging purposes only
from random import randint, choice
seed = 25
numbers = {randint(0, seed) for _ in xrange(seed)}
arr = [i for i in xrange(max(numbers))]
size_arr = {i: get_size(i, arr)+1 for i in arr}
assert find_connection(1, 2, arr) == False
assert get_root(1, arr) == 1
assert get_root(2, arr) == 2
assert get_root(0, arr) == 0
print qwc(1,2, arr, size_arr)
print qwc(3,5, arr, size_arr)
print qwc(5,2, arr, size_arr)
assert get_root(2, arr) == 3
print qwc(10,19, arr, size_arr)
print qwc(6,2, arr, size_arr)
print "--"*10
print qwc(11,20, arr, size_arr)
print qwc(20,21, arr, size_arr)
print qwc(16,21, arr, size_arr)
print qwc(14,23, arr, size_arr)
print qwc(21,23, arr, size_arr)
print "--"*10
print qwc(23,2, arr, size_arr)
print "--"*10
| codecakes/algorithms | algorithms/week1/union_find.py | Python | mit | 6,494 |
from __future__ import absolute_import
import logging
from opentracing.ext import tags
from tornado.stack_context import wrap as keep_stack_context
from opentracing_instrumentation import utils
from ..request_context import get_current_span, span_in_stack_context
from ._patcher import Patcher
try:
from boto3.resources.action import ServiceAction
from boto3.s3 import inject as s3_functions
from botocore import xform_name
from botocore.client import BaseClient
from botocore.exceptions import ClientError
from s3transfer.futures import BoundedExecutor
except ImportError:
pass
else:
_service_action_call = ServiceAction.__call__
_client_make_api_call = BaseClient._make_api_call
_Executor = BoundedExecutor.EXECUTOR_CLS
logger = logging.getLogger(__name__)
class Boto3Patcher(Patcher):
applicable = '_service_action_call' in globals()
S3_FUNCTIONS_TO_INSTRUMENT = (
'copy',
'download_file',
'download_fileobj',
'upload_file',
'upload_fileobj',
)
def __init__(self):
super(Boto3Patcher, self).__init__()
self.s3_original_funcs = {}
def _install_patches(self):
ServiceAction.__call__ = self._get_service_action_call_wrapper()
BaseClient._make_api_call = self._get_client_make_api_call_wrapper()
BoundedExecutor.EXECUTOR_CLS = self._get_instrumented_executor_cls()
for func_name in self.S3_FUNCTIONS_TO_INSTRUMENT:
func = getattr(s3_functions, func_name, None)
if func:
self.s3_original_funcs[func_name] = func
func_wrapper = self._get_s3_call_wrapper(func)
setattr(s3_functions, func_name, func_wrapper)
else:
logging.warning('S3 function %s not found', func_name)
def _reset_patches(self):
ServiceAction.__call__ = _service_action_call
BaseClient._make_api_call = _client_make_api_call
BoundedExecutor.EXECUTOR_CLS = _Executor
for func_name, original_func in self.s3_original_funcs.items():
setattr(s3_functions, func_name, original_func)
@staticmethod
def set_request_id_tag(span, response):
metadata = response.get('ResponseMetadata')
# there is no ResponseMetadata for
# boto3:dynamodb:describe_table
if metadata:
request_id = metadata.get('RequestId')
# when using boto3.client('s3')
# instead of boto3.resource('s3'),
# there is no RequestId for
# boto3:s3:CreateBucket
if request_id:
span.set_tag('aws.request_id', request_id)
def _get_service_action_call_wrapper(self):
def service_action_call_wrapper(service, parent, *args, **kwargs):
"""Wraps ServiceAction.__call__"""
service_name = parent.meta.service_name
operation_name = xform_name(
service._action_model.request.operation
)
return self.perform_call(
_service_action_call, 'resource',
service_name, operation_name,
service, parent, *args, **kwargs
)
return service_action_call_wrapper
def _get_client_make_api_call_wrapper(self):
def make_api_call_wrapper(client, operation_name, api_params):
"""Wraps BaseClient._make_api_call"""
service_name = client._service_model.service_name
formatted_operation_name = xform_name(operation_name)
return self.perform_call(
_client_make_api_call, 'client',
service_name, formatted_operation_name,
client, operation_name, api_params
)
return make_api_call_wrapper
def _get_s3_call_wrapper(self, original_func):
operation_name = original_func.__name__
def s3_call_wrapper(*args, **kwargs):
"""Wraps __call__ of S3 client methods"""
return self.perform_call(
original_func, 'client', 's3', operation_name, *args, **kwargs
)
return s3_call_wrapper
def perform_call(self, original_func, kind, service_name, operation_name,
*args, **kwargs):
span = utils.start_child_span(
operation_name='boto3:{}:{}:{}'.format(
kind, service_name, operation_name
),
parent=get_current_span()
)
span.set_tag(tags.SPAN_KIND, tags.SPAN_KIND_RPC_CLIENT)
span.set_tag(tags.COMPONENT, 'boto3')
span.set_tag('boto3.service_name', service_name)
with span, span_in_stack_context(span):
try:
response = original_func(*args, **kwargs)
except ClientError as error:
self.set_request_id_tag(span, error.response)
raise
else:
if isinstance(response, dict):
self.set_request_id_tag(span, response)
return response
def _get_instrumented_executor_cls(self):
class InstrumentedExecutor(_Executor):
def submit(self, task, *args, **kwargs):
return super(InstrumentedExecutor, self).submit(
keep_stack_context(task), *args, **kwargs
)
return InstrumentedExecutor
Boto3Patcher.configure_hook_module(globals())
| uber-common/opentracing-python-instrumentation | opentracing_instrumentation/client_hooks/boto3.py | Python | mit | 5,428 |
import scipy.io
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import accuracy_score
from skfeature.function.sparse_learning_based import RFS
from skfeature.utility.sparse_learning import construct_label_matrix, feature_ranking
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
Y = construct_label_matrix(y)
n_samples, n_features = X.shape
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the feature weight matrix
Weight = RFS.rfs(X[train, :], Y[train, :], gamma=0.1)
# sort the feature scores in an ascending order according to the feature scores
idx = feature_ranking(Weight)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
print acc
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main() | jundongl/scikit-feast | skfeature/example/test_RFS.py | Python | gpl-2.0 | 1,688 |
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from ige.IObject import IObject
from ige.IDataHolder import IDataHolder
from Const import *
import Rules
from IGalaxy import IGalaxy
import time, tempfile, os
import ige
from ige import log
from GalaxyGenerator import GenerateGalaxy
from ige import GameException
try:
import ClientVersion
except ImportError:
# fake it
class ClientVersionClass:
pass
ClientVersion = ClientVersionClass()
ClientVersion.version = (0, 0, 0, "?")
ClientVersion.build = 0
ClientVersion.revision = 0
ClientVersion.versionString = "Version not specified"
class IUniverse(IObject):
typeID = T_UNIVERSE
forums = {"NEWS": 112, "QA": 112, "IDEAS": 112, "PUBLIC": 112, "ISSUES": 112}
def init(self, obj):
IObject.init(self, obj)
#
obj.name = "Outer Space"
obj.turn = 0
obj.owner = OID_ADMIN
obj.galaxies = []
obj.players = []
# auto loading of galaxies
obj.galX = 0.0
obj.galY = 0.0
obj.galXStep = 100.0
obj.galYStep = 100.0
obj.galFilename = ''
obj.galID = ''
def getIntroInfo(self, tran, obj):
result = IDataHolder()
result.cid = tran.cid
result.turn = obj.turn
result.serverTime = time.time()
result.lastClientVersion = ClientVersion.version
result.lastClientRevision = ClientVersion.revision
result.rulesetName = Rules.rulesetName
return result
getIntroInfo.public = 1
getIntroInfo.accLevel = AL_NONE
def multiGetInfo(self, tran, obj, objIDs):
result = []
messages = []
# perform getInfo or getPublicInfo command for each objID
for objID in objIDs:
try:
tmpObj, tmpMsgs = tran.gameMngr.execute(tran.session.sid, 'getInfo', objID)
except ige.SecurityException:
tmpObj, tmpMsgs = tran.gameMngr.execute(tran.session.sid, 'getPublicInfo', objID)
except ige.NoSuchObjectException:
tmpObj = None
if tmpObj:
result.append(tmpObj)
messages.extend(tmpMsgs)
# restore messages
for msgID, data in messages:
tran.session.messages[msgID] = data
return result
multiGetInfo.public = 1
multiGetInfo.accLevel = AL_NONE
def multiGetMsgs(self, tran, obj, mailboxes):
result = []
messages = []
# perform getMsgs
for objID, lastID in mailboxes:
data, tmpMsgs = tran.gameMngr.execute(tran.session.sid, 'getMsgs', objID, lastID)
result.append((objID, data))
messages.extend(tmpMsgs)
# restore messages
for msgID, data in messages:
tran.session.messages[msgID] = data
return result
multiGetMsgs.public = 1
multiGetMsgs.accLevel = AL_NONE
def createGalaxy(self, tran, obj):
galaxy = self.new(T_GALAXY)
galaxy.compOf = obj.oid
oid = tran.db.create(galaxy)
obj.galaxies.append(oid)
return oid
createGalaxy.public = 1
createGalaxy.accLevel = AL_ADMIN
def createAsteroid(self, tran, obj, x, y, targetID, speed, hp):
asteroid = self.new(T_ASTEROID)
tran.db.create(asteroid)
self.cmd(asteroid).create(tran, asteroid, x, y, targetID, speed, hp)
return asteroid.oid
createAsteroid.public = 1
createAsteroid.accLevel = AL_ADMIN
def processINITPhase(self, tran, obj, data):
try:
## find active/inactive pacts
# set all active/on pacts to active
for playerID in obj.players:
#@log.debug("Processing player", playerID)
player = tran.db[playerID]
for partyID in player.diplomacyRels:
#@log.debug("Processing party", partyID)
dipl = player.diplomacyRels[partyID]
for pactID in dipl.pacts.keys():
if pactID not in Rules.pactDescrs:
# this is invalid pactID
log.debug(playerID, "Deleting invalid pact with", partyID, "pact", pactID)
del dipl.pacts[pactID]
continue
if dipl.pacts[pactID][0] > PACT_OFF:
dipl.pacts[pactID][0] = PACT_ACTIVE
# inactivate all pact that does not satisfy conditions
changed = 1
defaultPact = [PACT_OFF]
while changed:
changed = 0
log.debug("Inactivate pacts iteration starting...")
for playerID in obj.players:
#@log.debug("Processing player", playerID)
player = tran.db[playerID]
# all parties of a player
for partyID in player.diplomacyRels:
#@log.debug("Processing party", partyID)
party = tran.db[partyID]
partyDipl = party.diplomacyRels.get(playerID, None)
if not partyDipl:
continue
dipl = player.diplomacyRels[partyID]
# correct relations
dipl.relation = min(dipl.relation, partyDipl.relation)
# all pacts with party
for pactID in dipl.pacts:
# check validity interval
pactSpec = Rules.pactDescrs[pactID]
if (dipl.relation < pactSpec.validityInterval[0] or \
dipl.relation > pactSpec.validityInterval[1]) and \
dipl.pacts[pactID][0] == PACT_ACTIVE:
#@log.debug("Inactivating pact (validity interval)", playerID, pactID)
dipl.pacts[pactID][0] = PACT_INACTIVE
changed = 1
# check conditions for the pact if pact is active
if dipl.pacts[pactID][0] == PACT_ACTIVE:
for condPactID in dipl.pacts[pactID][1:]:
#@log.debug("Checking", playerID, pactID, "against", partyID, condPactID)
if partyDipl and partyDipl.pacts.get(condPactID, defaultPact)[0] != PACT_ACTIVE:
dipl.pacts[pactID][0] = PACT_INACTIVE
changed = 1
except Exception:
log.warning("Cannot process diplomacy initialization")
# TODO - send notifications if pacts are changed
# remove old messages
self.cmd(obj).deleteOldMsgs(tran, obj)
return obj.players[:] + [OID_NATURE]
processINITPhase.public = 1
processINITPhase.accLevel = AL_ADMIN
def processPRODPhase(self, tran, obj, data):
raise NotImplementedError()
processPRODPhase.public = 1
processPRODPhase.accLevel = AL_ADMIN
def processACTIONPhase(self, tran, obj, data):
raise NotImplementedError()
processACTIONPhase.public = 1
processACTIONPhase.accLevel = AL_ADMIN
def processBATTLEPhase(self, tran, obj, data):
raise NotImplementedError()
processBATTLEPhase.public = 1
processBATTLEPhase.accLevel = AL_ADMIN
def processFINALPhase(self, tran, obj, data):
return obj.players[:] + [OID_NATURE]
processFINALPhase.public = 1
processFINALPhase.accLevel = AL_ADMIN
def processFINAL2Phase(self, tran, obj, data):
# distribute stats to contacts
for playerID in obj.players:
player = tran.db[playerID]
for partyID in player.diplomacyRels:
dipl = player.diplomacyRels[partyID]
if dipl.contactType > CONTACT_NONE and tran.db.has_key(partyID):
dipl.stats = tran.db[partyID].stats
else:
dipl.stats = None
# imperator voting
turn = tran.db[OID_UNIVERSE].turn
if (turn + 2 * Rules.turnsPerDay) % Rules.voteForImpPeriod == 0:
for galaxyID in obj.galaxies:
galaxy = tran.db[galaxyID]
if not galaxy.timeEnabled:
# skip this galaxy
continue
message = {
"sender": "GNC",
"senderID": galaxyID,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_VOTING_COMING, galaxyID, turn, None),
"topic": "EVENT",
}
self.cmd(galaxy).sendMsg(tran, galaxy, message)
if turn % Rules.voteForImpPeriod == 0:
# voting
# process each galaxy
for galaxyID in obj.galaxies:
log.debug("Voting for galaxy", galaxyID)
galaxy = tran.db[galaxyID]
if not galaxy.timeEnabled:
# skip this galaxy
continue
# compute votes
activePlayerCount = 0
piratePlayer = False
selfName = None
sum = 0
votes = {}
votesID = {}
voters = {}
for playerID in obj.players:
player = tran.db[playerID]
if galaxyID not in player.galaxies:
log.debug("Skipping player", playerID, " - not in this galaxy")
continue
if player.type == T_PIRPLAYER:
log.debug("Skipping player", playerID, " - he/she is a pirate")
piratePlayer = True
activePlayerCount += 1
continue
if player.type != T_PLAYER:
log.debug("Skipping player", playerID, " - it's not a regular player")
# skip non-regular players
continue
selfName = player.name
# add to sum
log.debug(playerID, "votes for", player.voteFor, "with votes", player.stats.slots)
activePlayerCount += 1
sum += player.stats.slots
if player.voteFor == OID_NONE:
voteFor = None
else:
tmpPlayer = tran.db.get(player.voteFor, None)
if not tmpPlayer or tmpPlayer.type != T_PLAYER:
# reset vote
player.voteFor = OID_NONE
voteFor = None
else:
voteFor = tmpPlayer.name
# count votes
votes[voteFor] = votes.get(voteFor, 0) + player.stats.slots
votesID[player.voteFor] = votesID.get(player.voteFor, 0) + player.stats.slots
if voteFor in voters:
voters[voteFor].append(player.name)
else:
voters[voteFor] = [player.name]
# check winner
nominated = votesID.keys()
nominated.sort(lambda a, b: cmp(votesID[b], votesID[a]))
winnerID = OID_NONE
# remove OID_NONE from the list
if OID_NONE in nominated:
nominated.remove(OID_NONE)
# check winner
if nominated and float(votesID[nominated[0]]) / sum >= Rules.ratioNeededForImp:
# we have the imperator!
imperator = tran.db[nominated[0]]
# 2 imperator, 3+ winner
imperator.imperator = max(2, imperator.imperator + 1)
if galaxy.imperator != OID_NONE and galaxy.imperator != imperator.oid:
tran.db[galaxy.imperator].imperator = 0
galaxy.imperator = imperator.oid
# send message
message = {
"sender": "GNC",
"senderID": galaxyID,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_VOTING_IMPERATOR, galaxyID, turn, (imperator.name, (votes,voters))),
"topic": "EVENT",
}
self.cmd(galaxy).sendMsg(tran, galaxy, message)
elif len(nominated) >= 1:
# we have the leader!
leader = tran.db[nominated[0]]
leader.imperator = 1
if galaxy.imperator != OID_NONE and galaxy.imperator != leader.oid:
tran.db[galaxy.imperator].imperator = 0
galaxy.imperator = leader.oid
# send message
message = {
"sender": "GNC",
"senderID": galaxyID,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_VOTING_LEADER, galaxyID, turn, (leader.name, (votes,voters))),
"topic": "EVENT",
}
self.cmd(galaxy).sendMsg(tran, galaxy, message)
else:
# nobody wins
galaxy.imperator = OID_NONE
message = {
"sender": "GNC",
"senderID": galaxyID,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_VOTING_NOWINNER, galaxyID, turn, ((votes,voters),)),
"topic": "EVENT",
}
self.cmd(galaxy).sendMsg(tran, galaxy, message)
# check one player win conditions, but only in normal mode (not development)
if activePlayerCount <= 1 and tran.gameMngr.config.server.mode == "normal":
log.message("AUTO RESTARTING GALAXY", galaxyID)
if activePlayerCount == 0:
self.restartGalaxy2(tran, obj, galaxyID, ["The galaxy was ended with no active players."])
elif piratePlayer: #if the pirate is still alive, then he must be the victor.
self.restartGalaxy2(tran, obj, galaxyID, ["The galaxy was automatically ended with the Pirate as victor!"])
elif selfName: #if there is only one player, selfName must be themselves if it isn't null
self.restartGalaxy2(tran, obj, galaxyID, ["The galaxy was automatically ended with commander %s as the only remaining player." % selfName])
# collect mailboxes
used = [self.cmd(obj).getMailboxName(tran, obj)]
for galaxyID in obj.galaxies:
tmpObj = tran.db[galaxyID]
used.append(self.cmd(tmpObj).getMailboxName(tran, tmpObj))
for playerID in obj.players:
tmpObj = tran.db[playerID]
used.append(self.cmd(tmpObj).getMailboxName(tran, tmpObj))
# trash unused mailboxes
tran.gameMngr.msgMngr.trashUnusedMailboxes(used)
return obj.galaxies
processFINAL2Phase.public = 1
processFINAL2Phase.accLevel = AL_ADMIN
def update(self, tran, obj):
# check existence of all galaxies
log.debug('Game turn is',obj.turn)
if 0:
for galaxyID in obj.galaxies:
if not tran.db.has_key(galaxyID):
log.debug("CONSISTENCY - galaxy %d from universe %d does not exists" % (galaxyID, obj.oid))
elif tran.db[galaxyID].type != T_GALAXY:
log.debug("CONSISTENCY - galaxy %d from universe %d is not a T_GALAXY" % (galaxyID, obj.oid))
# check existence of all players
for playerID in obj.players[:]:
if not tran.db.has_key(playerID):
log.debug("CONSISTENCY - player %d from universe %d does not exists" % (playerID, obj.oid))
log.debug("Removing reference to player", playerID)
obj.players.remove(playerID)
elif tran.db[playerID].type not in PLAYER_TYPES:
log.debug("CONSISTENCY - player %d from universe %d is not a %s, it's %d" % (playerID, obj.oid, str(PLAYER_TYPES), tran.db[playerID].type))
log.debug("Removing reference to player", playerID)
obj.players.remove(playerID)
# create NATURE if needed
if not tran.db.has_key(OID_NATURE):
# create "nature player"
player = self.new(T_NATURE)
tran.gameMngr.registerPlayer(player.login, player, OID_NATURE)
update.public = 0
def getReferences(self, tran, obj):
return obj.players[:] + obj.galaxies[:] + [OID_NATURE]
getReferences.public = 0
def getPublicInfo(self, tran, obj):
result = IDataHolder()
result.oid = obj.oid
result.type = obj.type
result.name = obj.name
result.turn = obj.turn
return result
getPublicInfo.public = 1
getPublicInfo.accLevel = AL_NONE
## messaging
def canGetMsgs(self, tran, obj, oid):
return 1
canGetMsgs.public = 0
def canSendMsg(self, tran, obj, oid, forum):
if forum == "QA":
return 1
elif forum == "PUBLIC":
return 1
elif forum == "IDEAS":
return 1
elif forum == "ISSUES":
return 1
elif forum == "NEWS":
return 1
return 0
canSendMsg.public = 0
def restartGalaxy(self, tran, obj, galaxyID, imperatorMessage): #client-initiated restart
log.debug("Restarting Galaxy", galaxyID)
galaxy = tran.db[galaxyID]
if galaxy.imperator == 0 or galaxy.imperator != tran.cid:
raise GameException('Only galaxy imperator can restart galaxy')
imperator = tran.db[tran.cid]
if imperator.imperator < 3:
raise GameException('Only imperator elected three times and more can restart galaxy')
log.debug("Sending message", imperatorMessage)
message = {
"sender": imperator.name,
"senderID": tran.cid,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_GALAXY_RESTARTED, galaxyID, tran.db[OID_UNIVERSE].turn, (imperator.name, galaxy.name, imperatorMessage)),
"topic": "EVENT",
}
self.cmd(obj).sendMsg(tran, obj, message)
fh, galaxyFileName = tempfile.mkstemp(text = True)
log.debug("Generating new galaxy to temporary file", galaxyFileName)
strGalaxyID = 'Circle42P'
GenerateGalaxy(strGalaxyID, os.fdopen(fh, "w+b"))
oldX = galaxy.x
oldY = galaxy.y
oldName = galaxy.name
log.debug("Deleting galaxy", galaxyID)
self.cmd(galaxy).delete(tran, galaxy)
log.debug("Creating new galaxy")
newGalaxyID = self.createGalaxy(tran, obj)
log.debug("Created new galaxy", newGalaxyID)
newGalaxy = tran.db[newGalaxyID]
log.debug("Loading new ", newGalaxyID)
self.cmd(newGalaxy).loadFromXML(tran, newGalaxy, galaxyFileName, strGalaxyID, oldX, oldY, oldName)
log.debug("Setup Enviroment", newGalaxyID)
self.cmd(newGalaxy).setupEnvironment(tran, newGalaxy)
log.debug("Sending Announcement Message", newGalaxyID)
#self.cmd(newGalaxy).announceGalaxy(tran,newGalaxy)
log.debug("Removing temp file", galaxyFileName)
os.remove(galaxyFileName)
# TODO: find you what's this code about
#message = {
# "sender": 'Galaxy %s' % oldName,
# "senderID": obj.oid,
# "forum": "NEWS",
# "data": (obj.oid, MSG_GNC_GALAXY_GENERATOR, obj.oid, tran.db[OID_UNIVERSE].turn, (oldName, newGalaxy.description)),
# "topic": "EVENT",
#}
log.debug("Galaxy Restarting END")
restartGalaxy.public = 1
restartGalaxy.accLevel = AL_NONE
def restartGalaxy2(self, tran, obj, galaxyID, imperatorMessage): #server-initiated restart
log.debug("Restarting Galaxy", galaxyID)
galaxy = tran.db[galaxyID]
log.debug("Sending message", imperatorMessage)
message = {
"sender": "Galaxy %s" % galaxy.name,
"senderID": tran.cid,
"forum": "NEWS",
"data": (galaxyID, MSG_GNC_GALAXY_AUTO_RESTARTED, galaxyID, tran.db[OID_UNIVERSE].turn, (galaxy.name, imperatorMessage)),
"topic": "EVENT",
}
self.cmd(obj).sendMsg(tran, obj, message)
fh, galaxyFileName = tempfile.mkstemp(text = True)
log.debug("Generating new galaxy to temporary file", galaxyFileName)
strGalaxyID = 'Circle42P'
GenerateGalaxy(strGalaxyID, os.fdopen(fh, "w+b"))
oldX = galaxy.x
oldY = galaxy.y
oldName = galaxy.name
log.debug("Deleting galaxy", galaxyID)
self.cmd(galaxy).delete(tran, galaxy)
log.debug("Creating new galaxy")
newGalaxyID = self.createGalaxy(tran, obj)
log.debug("Created new galaxy", newGalaxyID)
newGalaxy = tran.db[newGalaxyID]
log.debug("Loading new ", newGalaxyID)
self.cmd(newGalaxy).loadFromXML(tran, newGalaxy, galaxyFileName, strGalaxyID, oldX, oldY, oldName)
log.debug("Setup Enviroment", newGalaxyID)
self.cmd(newGalaxy).setupEnvironment(tran, newGalaxy)
log.debug("Sending Announcement Message", newGalaxyID)
#self.cmd(newGalaxy).announceGalaxy(tran,newGalaxy)
log.debug("Removing temp file", galaxyFileName)
os.remove(galaxyFileName)
# TODO: find you what's this code about
#message = {
# "sender": 'Galaxy'+galaxyName,
# "senderID": obj.oid,
# "forum": "NEWS",
# "data": (obj.oid, MSG_GNC_GALAXY_GENERATOR, obj.oid, tran.db[OID_UNIVERSE].turn, (galaxyName, newGalaxy.description)),
# "topic": "EVENT",
#}
log.debug("Galaxy Restarting END")
restartGalaxy2.public = 1
restartGalaxy2.accLevel = AL_ADMIN
def createNewGalaxy(self, tran, obj, x, y, galaxyName):
log.message("Adding new galaxy '%s' to (%d, %d)" % (galaxyName, x, y))
fh, galaxyFileName = tempfile.mkstemp(text = True)
log.debug("Generating new galaxy to temporary file", galaxyFileName)
strGalaxyID = 'Circle42P'
GenerateGalaxy(strGalaxyID, os.fdopen(fh, "w+b"))
log.debug("Creating new galaxy")
newGalaxyID = self.createGalaxy(tran, obj)
log.debug("Created new galaxy", newGalaxyID)
newGalaxy = tran.db[newGalaxyID]
log.debug("Loading new ", newGalaxyID)
self.cmd(newGalaxy).loadFromXML(tran, newGalaxy, galaxyFileName, strGalaxyID, x, y, galaxyName)
log.debug("Setup Enviroment", newGalaxyID)
self.cmd(newGalaxy).setupEnvironment(tran, newGalaxy)
log.debug("Sending Announcement Message", newGalaxyID)
#self.cmd(newGalaxy).announceGalaxy(tran,newGalaxy)
log.debug("Removing temp file", galaxyFileName)
os.remove(galaxyFileName)
# TODO: find you what's this code about
#message = {
# "sender": 'Galaxy %s' % galaxyName,
# "senderID": obj.oid,
# "forum": "NEWS",
# "data": (obj.oid, MSG_GNC_GALAXY_GENERATOR, obj.oid, tran.db[OID_UNIVERSE].turn, (galaxyName, newGalaxy.description)),
# "topic": "EVENT",
#}
log.debug("Galaxy Restarting END")
createNewGalaxy.public = 1
createNewGalaxy.accLevel = AL_ADMIN
def deleteGalaxy(self, tran, galaxyID):
galaxy = tran.db[galaxyID]
log.debug("Deleting galaxy", galaxyID)
self.cmd(galaxy).delete(tran, galaxy)
deleteGalaxy.public = 1
deleteGalaxy.accLevel = AL_ADMIN
| OuterDeepSpace/OuterDeepSpace | libs/server/ige/ospace/IUniverse.py | Python | gpl-2.0 | 20,112 |
#!/usr/bin/env python
################################################################################
##
## substructure_generate_fulltable.py
## Author: Satoshi Takahama (satoshi.takahama@epfl.ch)
## Nov. 2014
##
## -----------------------------------------------------------------------------
##
## This file is part of APRL-SSP
##
## APRL-SSP is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## APRL-SSP is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with APRL-SSP. If not, see <http://www.gnu.org/licenses/>.
##
################################################################################
import os
import re
import pybel
import openbabel
import pandas as pd
from collections import OrderedDict
from argparse import ArgumentParser, RawTextHelpFormatter
from util import searchgroups
## import igraph ## didn't need
###_* --- Define command-line arguments
parser = ArgumentParser(description='''
============================================================
Find adjacent atoms and bond order for each atom.
Example usage:
$ python substructure_adjacent_atoms.py -i apinenemech.csv -o apinenemech_adjacent_atoms.csv
''',formatter_class=RawTextHelpFormatter)
###_ . Arguments
parser.add_argument('-i','--inputfile',type=str,
help='file of SMILES strings (label, SMILES); csv format')
parser.add_argument('-o','--outputfile',type=str,default='output',
help='output file name')
if __name__=='__main__':
###_* --- Parse arguments
args = parser.parse_args()
## for debugging
## args = parser.parse_args('-i examples/example_main.csv -o output.csv'.split())
###_* --- Read SMILES file
inp = pd.read_csv(args.inputfile).drop_duplicates().set_index('compound')[['SMILES']]
## http://openbabel.org/docs/dev/UseTheLibrary/Python_PybelAPI.html
## http://openbabel.org/docs/dev/UseTheLibrary/PythonExamples.html
## http://openbabel.org/dev-api/classOpenBabel_1_1OBAtom.shtml
## pyatom.idx == obatom.GetIdx()
## pyatom.idx != obatom.GetIndex()
edgelist = []
for compound in inp.index:
mol = pybel.readstring('smi', inp.SMILES.ix[compound])
mol.addh()
for pyatom in mol.atoms:
obatom = pyatom.OBAtom
idx1 = obatom.GetIdx()
atype1 = obatom.GetType()
for neighbor in openbabel.OBAtomAtomIter(obatom):
idx2 = neighbor.GetIdx()
atype2 = neighbor.GetType()
bond = obatom.GetBond(neighbor)
bondorder = bond.GetBondOrder()
edgelist.append((compound, idx1, idx2, atype1, atype2, bondorder))
edgeframe = pd.DataFrame(edgelist, columns=['compound', 'atom1', 'atom2', 'atom1_type', 'atom2_type', 'bondorder'])
edgeframe.to_csv(args.outputfile, index=False)
| stakahama/aprl-ssp | substructure_adjacent_atoms.py | Python | gpl-3.0 | 3,237 |
#!/usr/bin/env python
import os
import subprocess
from staticbuilder import StaticBuilder
def test():
"""
Test harness for static builder.
Checks SB both as imported object and command line utility.
Test paths must be set up correctly:
(TODO: create setup function to automate directory/file creation)
"""
command_line_test = True
object_test = True
# TEST COMMAND LINE
if command_line_test:
print "Testing SB from the command line"
# Test bad local path.
print "Testing bad local path"
cmd = "python staticbuilder.py \
~/projects/staticbuilder/sb_test_bucket/no_file.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test invalid location.
print "Testing invalid location"
cmd = "python staticbuilder.py \
-p invalid_location \
~/projects/staticbuilder/sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test that an absolute file path in works.
print "Testing single in path, absolute."
cmd = "python staticbuilder.py \
/Users/scottyoung/projects/staticbuilder/sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that a relative file path in works.
print "Testing single in path, relative."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile1.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that out path works.
print "Testing out path."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile1.txt sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that two in-paths work.
print "Testing two in paths."
cmd = "python staticbuilder.py \
sb_test_bucket/testfile2in1.txt \
sb_test_bucket/testfile2in2.txt \
sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that three in-paths work - no more after this!.
print "Testing three in paths."
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testfile3in1.txt \
sb_test_bucket/testdir1/testfile3in2.txt \
sb_test_bucket/testfile3in3.txt \
sb_test_bucket/testdir1/"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test for a single directory in
print "Testing single directory in - no recursion"
cmd = "python staticbuilder.py \
sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test for a single sub directory recursive
print "Testing single directory in - with recursion"
cmd = "python staticbuilder.py -r \
sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test a directory with out path - not recursive
print "Testing directory - no recursion"
cmd = "python staticbuilder.py \
sb_test_bucket/testdir1/testdir2 \
sb_test_bucket/testdir1/testdir2"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test a directory with out path - recursive
print "Testing directory - with recursion"
cmd = "python staticbuilder.py -r \
sb_test_bucket/testdir1/testdir2 \
sb_test_bucket/testdir1/testdir2"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test deletion of a file
print "Testing deletion of a file"
cmd = "python staticbuilder.py -f \
-d sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test deletion of a directory
print "Testing deletion of a file"
cmd = "python staticbuilder.py -f -r \
-d sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test no arguments - should upload cwd
print "Testing no arguments - no recursion"
os.chdir("sb_test_bucket")
cmd = "python ../staticbuilder.py"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test no arguments with recursion
print "Testing no arguments - with recursion"
cmd = "python ../staticbuilder.py -R"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test list bad bucket name
print "Testing option -l buckets with bad bucket name"
os.chdir("..")
cmd = "python staticbuilder.py -l no_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test that SB can list all buckets
print "Testing option -l buckets (list buckets)"
cmd = "python staticbuilder.py -l buckets"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that SB can list all keys in a bucket
print "Testing option -l sb_test_bucket (list all keys in bucket)"
cmd = "python staticbuilder.py -l sb_test_bucket"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test that SB can list filtered keys
print "Testing option -l sb_test_bucket/testdir1 (list all keys in directory)"
cmd = "python staticbuilder.py -l sb_test_bucket/testdir1"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test rename with too few arguments errors
print "Testing option -n with 0 args"
cmd = "python staticbuilder.py -n new_name.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test rename with too many arguments errors
print "Testing option -n with 3 args"
cmd = "python staticbuilder.py -N new_name.text file1.txt file2.txt path/out "
ret = subprocess.call(cmd, shell=True)
assert ret == 2
# Test rename
print "Testing option -n (rename)"
cmd = "python staticbuilder.py --name new_name.txt sb_test_bucket/testfile0.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test metadata
print "Testing option -m (metadata)"
cmd = "python staticbuilder.py -m kick:ass sb_test_bucket/metadata.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
# Test acl
print "Testing option -a (acl)"
cmd = "python staticbuilder.py -a public-read sb_test_bucket/public.txt"
ret = subprocess.call(cmd, shell=True)
assert ret == 0
print "Complete SB test from command line."
##########################################
if object_test:
# TEST OBJECT
print "Testing SB as an object"
options = None
sb = StaticBuilder(options)
# Test bad local path.
print "Testing bad local path"
try:
sb.upload("~/projects/staticbuilder/st_test_bucket/file0.txt")
except SystemExit, e:
print e.code
assert e.code == 2
except Exception, e:
print "Unexpected Exception with bad local path."
else:
print "No exception raised with bad local path"
#sb = StaticBuilder(options)
# Test that an absolute file path in works.
print "Testing single in path, absolute."
sb.upload("/Users/scottyoung/projects/staticbuilder/sb_test_bucket/testfile0.txt")
# Test that a relative file path in works.
print "Testing single in path, relative."
sb.upload("sb_test_bucket/testdir1/testfile1.txt")
# Test that out path works.
print "Testing out path."
sb.upload("sb_test_bucket/testdir1/testfile1.txt", "sb_test_bucket")
# Test that two in-paths work.
print "Testing two in paths."
paths_in = ["sb_test_bucket/testfile2in1.txt", "sb_test_bucket/testfile2in2.txt"]
sb.upload(paths_in, "sb_test_bucket")
# Test that three in-paths work - no more after this!.
print "Testing three in paths."
paths_in = ["sb_test_bucket/testdir1/testfile3in1.txt", "sb_test_bucket/testdir1/testfile3in2.txt",
"sb_test_bucket/testfile3in3.txt"]
sb.upload(paths_in, "sb_test_bucket/testdir1/")
# Test for a single directory in
print "Testing single directory in - no recursion"
sb.upload("sb_test_bucket")
# Test for a single sub directory recursive
print "Testing single directory in - with recursion"
sb.upload("sb_test_bucket/testdir1", recursive=True)
# Test a directory with out_path - not recursive
print "Testing directory - no recursion"
sb.upload("sb_test_bucket/testdir1/testdir2", "sb_test_bucket/testdir1/testdir2")
# Test a directory with out_path - recursive
print "Testing directory - with recursion"
sb.upload("sb_test_bucket/testdir1/testdir2", "sb_test_bucket/testdir1/testdir2", recursive=True)
# Test deletion of a file
print "Testing deletion of a file"
sb.delete("sb_test_bucket/testfile0.txt", force=True)
# Test deletion of a directory
print "Testing deletion of a file"
sb.delete("sb_test_bucket/testdir1", force=True, recursive=True)
# Test no arguments - should upload cwd
print "Testing no arguments - no recursion"
os.chdir("sb_test_bucket")
sb.upload()
# Test no arguments with recursion
print "Testing no arguments - with recursion"
sb.upload(recursive=True)
# Test list bad bucket name
print "Testing option -l buckets (list buckets)"
os.chdir("..")
# Test that SB can list all buckets
print "Testing listBuckets"
sb.listBuckets()
# Test that SB can list all keys in a bucket
print "Testing option -l sb_test_bucket (list all keys in bucket)"
sb.listKeys("sb_test_bucket")
# Test that SB can list filtered keys
print "Testing option -l sb_test_bucket/testdir1 (list all keys in directory)"
sb.listKeys("sb_test_bucket/testdir1")
# Test rename with too few arguments errors
print "Testing name with too few args"
try:
sb.upload(name="new_name.txt")
except SystemExit, e:
assert e.code == 2
except Exception, e:
print "Unexpected Exception with 1 arg for name."
else:
print "No exception raised with 1 arg for name"
# Test rename with too many arguments errors
print "Testing name with too many args"
paths_in = ["sb_test_bucket/testfile2in1.txt", "sb_test_bucket/testfile2in2.txt"]
try:
sb.upload(paths_in=paths_in, name="new_name.txt")
except SystemExit, e:
assert e.code == 2
except Exception, e:
print "Unexpected Exception with 3 args for name."
else:
print "No exception raised with 3 args for name"
# Test rename
print "Testing option name"
sb.upload("sb_test_bucket/testfile0.txt", name="new_name.txt")
# Test metadata
print "Testing option -m (metadata)"
meta = {'kick':'ass'}
sb.upload("sb_test_bucket/metadata.txt", metadata=meta)
# Test acl
print "Testing option -a (acl)"
sb.set_acl("sb_test_bucket/public.txt", "public-read")
print "Complete SB object test."
if __name__ == "__main__":
test()
| coding4kicks/staticbuilder | test/staticbuilder_test.py | Python | mit | 11,842 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import six
# SPEC: Any plotting lib must implement these functions
@six.add_metaclass(abc.ABCMeta)
class PlottingBase():
#__metaclass__ = abc.ABCMeta
######################################
# #### Debatable methods
# @abc.abstractmethod
# def figure(self):
# """draws a figure like Matplotlib"""
# return
@abc.abstractmethod
def show(self):
"""show the plot like Matplotlib"""
return
# #### Debatable methods
######################################
@abc.abstractmethod
def plot_rect(self, r, edgecolor='k'):
"""plot 2-d rectangles"""
return
@abc.abstractmethod
def plot_abs_states(self, AA, s):
"""plot abstract state as 2-d rectangles"""
return
@abc.abstractmethod
def plot_trace_list(self, trace_list, x_vs_y=None):
"""plot all the traces in the trace list"""
return
@abc.abstractmethod
def plot_pwa_traces(self, txs):
"""plot pwa traces in the trace list"""
return
#TODO: exists in matplotlib..but does it exists in other libs?
@abc.abstractmethod
def plot(self, *args):
"""generic library plot calls"""
return
| zutshi/S3CAMR | src/plot/plotting_abstract.py | Python | bsd-2-clause | 1,336 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from rest_framework import serializers
from maintenance.models import RecreateSlave
from api.maintenance_base import MaintennanceBaseApi
class RecreateSlaveSerializer(serializers.ModelSerializer):
class Meta:
model = RecreateSlave
fields = (
'id',
'current_step',
'status',
'can_do_retry',
'task',
'created_at',
'host',
)
class RecreateSlaveAPI(MaintennanceBaseApi):
"""
Task API
"""
model = RecreateSlave
serializer_class = RecreateSlaveSerializer
filter_fields = (
'status',
'can_do_retry',
'task',
'host',
)
| globocom/database-as-a-service | dbaas/api/recreate_slave.py | Python | bsd-3-clause | 770 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-10 09:25
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stein', '0027_auto_20171010_1049'),
]
operations = [
migrations.CreateModel(
name='GlossaryEntry',
fields=[
('id', models.CharField(max_length=100, primary_key=True, serialize=False, verbose_name='id')),
('header', models.CharField(max_length=200, null=True, verbose_name='header')),
('description', models.TextField(null=True, verbose_name='description')),
('examples', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None, verbose_name='examples')),
],
options={
'verbose_name': 'Glossary Entry',
'verbose_name_plural': 'Glossary Entries',
},
),
]
| GeoMatDigital/django-geomat | geomat/stein/migrations/0028_glossaryentry.py | Python | bsd-3-clause | 1,003 |
import os
import subprocess
import tempfile
from conans.util.env_reader import get_env
from conans.util.files import load, mkdir, rmdir, save
from conans.util.log import logger
from conans.util.sha import sha256
CONAN_LINK = ".conan_link"
CONAN_REAL_PATH = "real_path.txt"
def conan_expand_user(path):
""" wrapper to the original expanduser function, to workaround python returning
verbatim %USERPROFILE% when some other app (git for windows) sets HOME envvar
"""
if path[:1] != '~':
return path
# In win these variables should exist and point to user directory, which
# must exist. Using context to avoid permanent modification of os.environ
old_env = dict(os.environ)
try:
home = os.environ.get("HOME")
# Problematic cases of wrong HOME variable
# - HOME = %USERPROFILE% verbatim, as messed by some other tools
# - MSYS console, that defines a different user home in /c/mingw/msys/users/xxx
# In these cases, it is safe to remove it and rely on USERPROFILE directly
if home and (not os.path.exists(home) or
(os.getenv("MSYSTEM") and os.getenv("USERPROFILE"))):
del os.environ["HOME"]
result = os.path.expanduser(path)
finally:
os.environ.clear()
os.environ.update(old_env)
return result
def path_shortener(path, short_paths):
""" short_paths is 4-state:
False: Never shorten the path
True: Always shorten the path, create link if not existing
None: Use shorten path only if already exists, not create
"""
use_always_short_paths = get_env("CONAN_USE_ALWAYS_SHORT_PATHS", False)
short_paths = use_always_short_paths or short_paths
if short_paths is False or os.getenv("CONAN_USER_HOME_SHORT") == "None":
return path
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
return load(link)
elif short_paths is None:
return path
if os.path.exists(path):
rmdir(path)
short_home = os.getenv("CONAN_USER_HOME_SHORT")
if not short_home:
drive = os.path.splitdrive(path)[0]
short_home = os.path.join(drive, os.sep, ".conan")
mkdir(short_home)
# Workaround for short_home living in NTFS file systems. Give full control permission
# to current user to avoid
# access problems in cygwin/msys2 windows subsystems when using short_home folder
try:
userdomain, username = os.getenv("USERDOMAIN"), os.environ["USERNAME"]
domainname = "%s\%s" % (userdomain, username) if userdomain else username
cmd = r'cacls %s /E /G "%s":F' % (short_home, domainname)
subprocess.check_output(cmd, stderr=subprocess.STDOUT) # Ignoring any returned output, quiet
except subprocess.CalledProcessError:
# cmd can fail if trying to set ACL in non NTFS drives, ignoring it.
pass
redirect = hashed_redirect(short_home, path)
if not redirect:
logger.warning("Failed to create a deterministic short path in %s", short_home)
redirect = tempfile.mkdtemp(dir=short_home, prefix="")
# Save the full path of the local cache directory where the redirect is from.
# This file is for debugging purposes and not used by Conan.
save(os.path.join(redirect, CONAN_REAL_PATH), path)
# This "1" is the way to have a non-existing directory, so commands like
# shutil.copytree() to it, works. It can be removed without compromising the
# temp folder generator and conan-links consistency
redirect = os.path.join(redirect, "1")
save(link, redirect)
return redirect
def rm_conandir(path):
"""removal of a directory that might contain a link to a short path"""
link = os.path.join(path, CONAN_LINK)
if os.path.exists(link):
short_path = load(link)
rmdir(os.path.dirname(short_path))
rmdir(path)
def hashed_redirect(base, path, min_length=6, attempts=10):
max_length = min_length + attempts
full_hash = sha256(path.encode())
assert len(full_hash) > max_length
for length in range(min_length, max_length):
redirect = os.path.join(base, full_hash[:length])
if not os.path.exists(redirect):
return redirect
else:
return None
| memsharded/conan | conans/util/windows.py | Python | mit | 4,269 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
test_ignore = ["Leave Block List"]
class TestDepartment(unittest.TestCase):
def test_remove_department_data(self):
doc = create_department("Test Department")
frappe.delete_doc('Department', doc.name)
def create_department(department_name, parent_department=None):
doc = frappe.get_doc({
'doctype': 'Department',
'is_group': 0,
'parent_department': parent_department,
'department_name': department_name,
'company': frappe.defaults.get_defaults().company
}).insert()
return doc
test_records = frappe.get_test_records('Department') | Zlash65/erpnext | erpnext/hr/doctype/department/test_department.py | Python | gpl-3.0 | 801 |
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
lowestPrice = 1000000
maxProfit = 0
for p in prices:
if p < lowestPrice:
lowestPrice = p
maxProfit = p - lowestPrice if p-lowestPrice > maxProfit else maxProfit
return maxProfit
| scream7/leetcode | algorithms/python/121.py | Python | apache-2.0 | 383 |
"""
Toggles for courseware in-course experience.
"""
from edx_toggles.toggles import LegacyWaffleFlagNamespace, SettingToggle
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag
# Namespace for courseware waffle flags.
WAFFLE_FLAG_NAMESPACE = LegacyWaffleFlagNamespace(name='courseware')
# .. toggle_name: courseware.use_legacy_frontend
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag to direct learners to the legacy courseware experience - the default behavior
# directs to the new MFE-based courseware in frontend-app-learning. Supports the ability to globally flip back to
# the legacy courseware experience.
# .. toggle_use_cases: temporary, open_edx
# .. toggle_creation_date: 2021-06-03
# .. toggle_target_removal_date: 2021-10-09
# .. toggle_tickets: DEPR-109
COURSEWARE_USE_LEGACY_FRONTEND = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'use_legacy_frontend', __name__
)
# .. toggle_name: courseware.microfrontend_course_team_preview
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag to display a link for the new learner experience to course teams without
# redirecting students. Supports staged rollout to course teams of a new micro-frontend-based implementation of the
# courseware page.
# .. toggle_use_cases: temporary, open_edx
# .. toggle_creation_date: 2020-03-09
# .. toggle_target_removal_date: 2020-12-31
# .. toggle_warnings: Also set settings.LEARNING_MICROFRONTEND_URL.
# .. toggle_tickets: DEPR-109
COURSEWARE_MICROFRONTEND_COURSE_TEAM_PREVIEW = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'microfrontend_course_team_preview', __name__
)
# Waffle flag to enable the course exit page in the learning MFE.
#
# .. toggle_name: courseware.microfrontend_course_exit_page
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Supports staged rollout of the new micro-frontend-based implementation of the course exit page.
# .. toggle_use_cases: open_edx, temporary
# .. toggle_creation_date: 2020-10-02
# .. toggle_target_removal_date: None
# .. toggle_warnings: Also set settings.LEARNING_MICROFRONTEND_URL.
# .. toggle_tickets: AA-188
COURSEWARE_MICROFRONTEND_COURSE_EXIT_PAGE = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'microfrontend_course_exit_page', __name__
)
# .. toggle_name: courseware.mfe_progress_milestones
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag to display learner progress milestones in a course. Supports staged
# rollout to students for a new micro-frontend-based implementation of the courseware page.
# .. toggle_use_cases: temporary, open_edx
# .. toggle_creation_date: 2020-10-07
# .. toggle_target_removal_date: none
# .. toggle_warnings: Also set settings.LEARNING_MICROFRONTEND_URL.
# .. toggle_tickets: AA-371
COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'mfe_progress_milestones', __name__
)
# .. toggle_name: courseware.mfe_progress_milestones_streak_celebration
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag to display a celebration modal when learner completes a configurable streak
# Supports staged rollout to students for a new micro-frontend-based implementation of the
# courseware page.
# .. toggle_use_cases: temporary, open_edx
# .. toggle_creation_date: 2021-02-16
# .. toggle_target_removal_date: None
# .. toggle_warnings: Also set settings.LEARNING_MICROFRONTEND_URL and
# COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES.
# .. toggle_tickets: AA-304
COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES_STREAK_CELEBRATION = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'mfe_progress_milestones_streak_celebration', __name__
)
# .. toggle_name: courseware.mfe_progress_milestones_streak_discount_enabled
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: This flag enables an engagement discount incentive message.
# .. toggle_warnings: This flag depends on the streak celebration feature being enabled
# .. toggle_use_cases: opt_out, open_edx
# .. toggle_creation_date: 2021-08-26
# .. toggle_target_removal_date: None
# .. toggle_tickets: https://openedx.atlassian.net/browse/AA-950
COURSEWARE_MFE_MILESTONES_STREAK_DISCOUNT = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'streak_discount_enabled',
__name__,
)
# .. toggle_name: courseware.optimized_render_xblock
# .. toggle_implementation: CourseWaffleFlag
# .. toggle_default: False
# .. toggle_description: Waffle flag that determines whether we speed up the render_xblock for browsers by
# removing unnecessary JavaScript and CSS. It is possible that this could introduce edge cases with content
# that relies on these assets, so being a CourseWaffleFlag will give us the flexibility to exempt courses
# from these optimizations.
# .. toggle_use_cases: temporary
# .. toggle_creation_date: 2021-02-09
# .. toggle_target_removal_date: 2021-05-01
COURSEWARE_OPTIMIZED_RENDER_XBLOCK = CourseWaffleFlag(
WAFFLE_FLAG_NAMESPACE, 'optimized_render_xblock', __name__
)
def courseware_mfe_is_active(course_key: CourseKey) -> bool:
"""
Should we serve the Learning MFE as the canonical courseware experience?
"""
#Avoid circular imports.
from lms.djangoapps.courseware.access_utils import in_preview_mode
# NO: Old Mongo courses are always served in the Legacy frontend,
# regardless of configuration.
if course_key.deprecated:
return False
# NO: MFE courseware can be disabled for users/courses/globally via this
# Waffle flag.
if COURSEWARE_USE_LEGACY_FRONTEND.is_enabled(course_key):
return False
# NO: Course preview doesn't work in the MFE
if in_preview_mode():
return False
# OTHERWISE: MFE courseware experience is active by default.
return True
def courseware_mfe_is_visible(
course_key: CourseKey,
is_global_staff=False,
is_course_staff=False,
) -> bool:
"""
Can we see a course run's content in the Learning MFE?
"""
#Avoid circular imports.
from lms.djangoapps.courseware.access_utils import in_preview_mode
# DENY: Old Mongo courses don't work in the MFE.
if course_key.deprecated:
return False
# DENY: Course preview doesn't work in the MFE
if in_preview_mode():
return False
# ALLOW: Where techincally possible, global staff may always see the MFE.
if is_global_staff:
return True
# ALLOW: If course team preview is enabled, then course staff may see their
# course in the MFE.
if is_course_staff and COURSEWARE_MICROFRONTEND_COURSE_TEAM_PREVIEW.is_enabled(course_key):
return True
# OTHERWISE: The MFE is only visible if it's the active (ie canonical) experience.
return courseware_mfe_is_active(course_key)
def courseware_mfe_is_advertised(
course_key: CourseKey,
is_global_staff=False,
is_course_staff=False,
) -> bool:
"""
Should we invite the user to view a course run's content in the Learning MFE?
This check is slightly different than `courseware_mfe_is_visible`, in that
we always *permit* global staff to view MFE content (assuming it's deployed),
but we do not shove the New Experience in their face if the preview isn't
enabled.
"""
#Avoid circular imports.
from lms.djangoapps.courseware.access_utils import in_preview_mode
# DENY: Old Mongo courses don't work in the MFE.
if course_key.deprecated:
return False
# DENY: Course preview doesn't work in the MFE
if in_preview_mode():
return False
# ALLOW: Both global and course staff can see the MFE link if the course team
# preview is enabled.
is_staff = is_global_staff or is_course_staff
if is_staff and COURSEWARE_MICROFRONTEND_COURSE_TEAM_PREVIEW.is_enabled(course_key):
return True
# OTHERWISE: The MFE is only advertised if it's the active (ie canonical) experience.
return courseware_mfe_is_active(course_key)
def courseware_legacy_is_visible(
course_key: CourseKey,
is_global_staff=False,
) -> bool:
"""
Can we see a course run's content in the Legacy frontend?
Note: This function will always return True for Old Mongo courses,
since `courseware_mfe_is_active` will always return False for them.
"""
#Avoid circular imports.
from lms.djangoapps.courseware.access_utils import in_preview_mode
# ALLOW: Global staff may always see the Legacy experience.
if is_global_staff:
return True
# ALLOW: All course previews will be shown in Legacy experience
if in_preview_mode():
return True
# OTHERWISE: Legacy is only visible if it's the active (ie canonical) experience.
# Note that Old Mongo courses are never the active experience,
# so we effectively always ALLOW them to be viewed in Legacy.
return not courseware_mfe_is_active(course_key)
def course_exit_page_is_active(course_key):
return (
courseware_mfe_is_active(course_key) and
COURSEWARE_MICROFRONTEND_COURSE_EXIT_PAGE.is_enabled(course_key)
)
def courseware_mfe_progress_milestones_are_active(course_key):
return (
courseware_mfe_is_active(course_key) and
COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES.is_enabled(course_key)
)
def streak_celebration_is_active(course_key):
return (
courseware_mfe_progress_milestones_are_active(course_key) and
COURSEWARE_MICROFRONTEND_PROGRESS_MILESTONES_STREAK_CELEBRATION.is_enabled(course_key)
)
# .. toggle_name: COURSES_INVITE_ONLY
# .. toggle_implementation: SettingToggle
# .. toggle_type: feature_flag
# .. toggle_default: False
# .. toggle_description: Setting this sets the default value of INVITE_ONLY across all courses in a given deployment
# .. toggle_category: admin
# .. toggle_use_cases: open_edx
# .. toggle_creation_date: 2019-05-16
# .. toggle_expiration_date: None
# .. toggle_tickets: https://github.com/mitodl/edx-platform/issues/123
# .. toggle_status: unsupported
def is_courses_default_invite_only_enabled():
return SettingToggle("COURSES_INVITE_ONLY", default=False).is_enabled()
| edx/edx-platform | lms/djangoapps/courseware/toggles.py | Python | agpl-3.0 | 10,426 |
"""Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import itertools
import logging
import os
import re
import shutil
import socket
import subprocess
import zope.interface
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import dvsni
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2enmod' binary.")
add("init-script", default=constants.CLI_DEFAULTS["init_script"],
help="Path to the Apache init script (used for server "
"reload/restart).")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
temp_install(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path, chain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {}
path["cert_path"] = self.parser.find_dir(
"SSLCertificateFile", None, vhost.path)
path["cert_key"] = self.parser.find_dir(
"SSLCertificateKeyFile", None, vhost.path)
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
# Assign the final directives; order is maintained in find_dir
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
if not path["chain_path"]:
self.parser.add_dir(
vhost.path, "SSLCertificateChainFile", chain_path)
else:
self.aug.set(path["chain_path"][-1], chain_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name)
def _choose_vhost_from_list(self, target_name):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
reasonable_vhosts = self._non_default_vhosts()
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
for vhost in self.vhosts:
all_names.update(vhost.get_names())
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
host.aliases.add(self.parser.get_arg(alias))
if servername_match:
# Get last ServerName as each overwrites the previous
host.name = self.parser.get_arg(servername_match[-1])
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
logger.info("Loading mod_ssl into Apache Server")
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
if not self.parser.find_dir("Listen", port):
logger.debug("No Listen %s directive found. Setting the "
"Apache Server to Listen on port %s", port, port)
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Only one vhost per file is allowed")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
self._verify_no_redirects(general_vh)
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_redirects(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginError: When another redirection exists
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
redirect_path = self.parser.find_dir("Redirect", None, start=vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
raise errors.PluginError("Existing Redirect present on HTTP vhost.")
if rewrite_path:
# "No existing redirection for virtualhost"
if len(rewrite_path) != len(constants.REWRITE_HTTPS_ARGS):
raise errors.PluginError("Unknown Existing RewriteRule")
for match, arg in itertools.izip(
rewrite_path, constants.REWRITE_HTTPS_ARGS):
if self.aug.get(match) != arg:
raise errors.PluginError("Unknown Existing RewriteRule")
raise errors.PluginError(
"Let's Encrypt has already enabled redirection")
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
if os.path.realpath(os.path.join(enabled_dir, entry)) == avail_fp:
return True
return False
def enable_site(self, vhost):
"""Enables an available site, Apache restart required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and restarts Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
if (not os.path.isdir(os.path.join(self.parser.root, "mods-available"))
or not os.path.isdir(
os.path.join(self.parser.root, "mods-enabled"))):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
self._enable_mod_debian(mod_name, temp)
self.save_notes += "Enabled %s module in Apache" % mod_name
logger.debug("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Restart is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Restarts apache server.
.. todo:: This function will be converted to using reload
:raises .errors.MisconfigurationError: If unable to restart due
to a configuration problem, or if the restart subprocess
cannot be run.
"""
return apache_restart(self.conf("init-script"))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
apache_dvsni = dvsni.ApacheDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
apache_dvsni.add_chall(achall, i)
sni_response = apache_dvsni.perform()
if sni_response:
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[apache_dvsni.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def apache_restart(apache_init_script):
"""Restarts the Apache Server.
:param str apache_init_script: Path to the Apache init script.
.. todo:: Try to use reload instead. (This caused timing problems before)
.. todo:: On failure, this should be a recovery_routine call with another
restart. This will confuse and inhibit developers from testing code
though. This change should happen after
the ApacheConfigurator has been thoroughly tested. The function will
need to be moved into the class again. Perhaps
this version can live on... for testing purposes.
:raises .errors.MisconfigurationError: If unable to restart due to a
configuration problem, or if the restart subprocess cannot be run.
"""
try:
proc = subprocess.Popen([apache_init_script, "restart"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError):
logger.fatal(
"Unable to restart the Apache process with %s", apache_init_script)
raise errors.MisconfigurationError(
"Unable to restart Apache process with %s" % apache_init_script)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr)
raise errors.MisconfigurationError(
"Error while restarting Apache:\n%s\n%s" % (stdout, stderr))
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast both to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
break
return avail_fp
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| jmhodges/letsencrypt | letsencrypt-apache/letsencrypt_apache/configurator.py | Python | apache-2.0 | 46,264 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
StatisticsByCategories.py
---------------------
Date : September 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'September 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QgsStatisticalSummary
from processing.core.outputs import OutputTable
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.tools import dataobjects, vector
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
class StatisticsByCategories(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
VALUES_FIELD_NAME = 'VALUES_FIELD_NAME'
CATEGORIES_FIELD_NAME = 'CATEGORIES_FIELD_NAME'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Statistics by categories')
self.group, self.i18n_group = self.trAlgorithm('Vector table tools')
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input vector layer')))
self.addParameter(ParameterTableField(self.VALUES_FIELD_NAME,
self.tr('Field to calculate statistics on'),
self.INPUT_LAYER, ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.CATEGORIES_FIELD_NAME,
self.tr('Field with categories'),
self.INPUT_LAYER, ParameterTableField.DATA_TYPE_ANY))
self.addOutput(OutputTable(self.OUTPUT, self.tr('Statistics by category')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
valuesFieldName = self.getParameterValue(self.VALUES_FIELD_NAME)
categoriesFieldName = self.getParameterValue(self.CATEGORIES_FIELD_NAME)
output = self.getOutputFromName(self.OUTPUT)
valuesField = layer.fieldNameIndex(valuesFieldName)
categoriesField = layer.fieldNameIndex(categoriesFieldName)
features = vector.features(layer)
total = 100.0 / len(features)
values = {}
for current, feat in enumerate(features):
progress.setPercentage(int(current * total))
attrs = feat.attributes()
try:
value = float(attrs[valuesField])
cat = str(attrs[categoriesField])
if cat not in values:
values[cat] = []
values[cat].append(value)
except:
pass
fields = ['category', 'min', 'max', 'mean', 'stddev', 'sum', 'count']
writer = output.getTableWriter(fields)
stat = QgsStatisticalSummary(QgsStatisticalSummary.Min | QgsStatisticalSummary.Max |
QgsStatisticalSummary.Mean | QgsStatisticalSummary.StDevSample |
QgsStatisticalSummary.Sum | QgsStatisticalSummary.Count)
for (cat, v) in list(values.items()):
stat.calculate(v)
record = [cat, stat.min(), stat.max(), stat.mean(), stat.sampleStDev(), stat.sum(), stat.count()]
writer.addRecord(record)
| fritsvanveen/QGIS | python/plugins/processing/algs/qgis/StatisticsByCategories.py | Python | gpl-2.0 | 4,207 |
# -*- coding: utf-8 -*-
import numbers
import numpy as np
from ..constants import BOLTZMANN_IN_MEV_K
from ..energy import Energy
class Analysis(object):
r"""Class containing methods for the Data class
Attributes
----------
detailed_balance_factor
Methods
-------
integrate
position
width
scattering_function
dynamic_susceptibility
estimate_background
get_keys
get_bounds
"""
@property
def detailed_balance_factor(self):
r"""Returns the detailed balance factor (sometimes called the Bose
factor)
Parameters
----------
None
Returns
-------
dbf : ndarray
The detailed balance factor (temperature correction)
"""
return 1. - np.exp(-self.Q[:, 3] / BOLTZMANN_IN_MEV_K / self.temp)
def integrate(self, bounds=None, background=None, hkle=True):
r"""Returns the integrated intensity within given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : float
The integrated intensity either over all data, or within
specified boundaries
"""
result = 0
for key in self.get_keys(hkle):
result += np.trapz(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background),
np.squeeze(self.data[key][self.get_bounds(bounds)]))
return result
def position(self, bounds=None, background=None, hkle=True):
r"""Returns the position of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with position in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz(self.data[key][self.get_bounds(bounds)] *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def width(self, bounds=None, background=None, fwhm=False, hkle=True):
r"""Returns the mean-squared width of a peak within the given bounds
Parameters
----------
bounds : bool, optional
A boolean expression representing the bounds inside which the
calculation will be performed
background : float or dict, optional
Default: None
fwhm : bool, optional
If True, returns width in fwhm, otherwise in mean-squared width.
Default: False
hkle : bool, optional
If True, integrates only over h, k, l, e dimensions, otherwise
integrates over all dimensions in :py:attr:`.Data.data`
Returns
-------
result : tup
The result is a tuple with the width in each dimension of Q,
(h, k, l, e)
"""
result = ()
for key in self.get_keys(hkle):
_result = 0
for key_integrate in self.get_keys(hkle):
_result += np.trapz((self.data[key][self.get_bounds(bounds)] -
self.position(bounds, background, hkle=False)[key]) ** 2 *
(self.intensity[self.get_bounds(bounds)] - self.estimate_background(background)),
self.data[key_integrate][self.get_bounds(bounds)]) / self.integrate(bounds, background)
if fwhm:
result += (np.sqrt(np.squeeze(_result)) * 2. * np.sqrt(2. * np.log(2.)),)
else:
result += (np.squeeze(_result),)
if hkle:
return result
else:
return dict((key, value) for key, value in zip(self.get_keys(hkle), result))
def scattering_function(self, material, ei):
r"""Returns the neutron scattering function, i.e. the detector counts
scaled by :math:`4 \pi / \sigma_{\mathrm{tot}} * k_i/k_f`.
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts scaled by the total scattering cross section
and ki/kf
"""
ki = Energy(energy=ei).wavevector
kf = Energy(energy=ei - self.e).wavevector
return 4 * np.pi / material.total_scattering_cross_section * ki / kf * self.detector
def dynamic_susceptibility(self, material, ei):
r"""Returns the dynamic susceptibility
:math:`\chi^{\prime\prime}(\mathbf{Q},\hbar\omega)`
Parameters
----------
material : object
Definition of the material given by the :py:class:`.Material`
class
ei : float
Incident energy in meV
Returns
-------
counts : ndarray
The detector counts turned into the scattering function multiplied
by the detailed balance factor
"""
return self.scattering_function(material, ei) * self.detailed_balance_factor
def estimate_background(self, bg_params):
r"""Estimate the background according to ``type`` specified.
Parameters
----------
bg_params : dict
Input dictionary has keys 'type' and 'value'. Types are
* 'constant' : background is the constant given by 'value'
* 'percent' : background is estimated by the bottom x%, where x
is value
* 'minimum' : background is estimated as the detector counts
Returns
-------
background : float or ndarray
Value determined to be the background. Will return ndarray only if
`'type'` is `'constant'` and `'value'` is an ndarray
"""
if isinstance(bg_params, type(None)):
return 0
elif isinstance(bg_params, numbers.Number):
return bg_params
elif bg_params['type'] == 'constant':
return bg_params['value']
elif bg_params['type'] == 'percent':
inten = self.intensity[self.intensity >= 0.]
Npts = int(inten.size * (bg_params['value'] / 100.))
min_vals = inten[np.argsort(inten)[:Npts]]
background = np.average(min_vals)
return background
elif bg_params['type'] == 'minimum':
return min(self.intensity)
else:
return 0
def get_bounds(self, bounds):
r"""Generates a to_fit tuple if bounds is present in kwargs
Parameters
----------
bounds : dict
Returns
-------
to_fit : tuple
Tuple of indices
"""
if bounds is not None:
return np.where(bounds)
else:
return np.where(self.Q[:, 0])
def get_keys(self, hkle):
r"""Returns all of the Dictionary key names
Parameters
----------
hkle : bool
If True only returns keys for h,k,l,e, otherwise returns all keys
Returns
-------
keys : list
:py:attr:`.Data.data` dictionary keys
"""
if hkle:
return [key for key in self.data if key in self.Q_keys.values()]
else:
return [key for key in self.data if key not in self.data_keys.values()]
| neutronpy/neutronpy | neutronpy/data/analysis.py | Python | mit | 8,726 |
import unittest
from slack_sdk.http_retry.builtin_async_handlers import AsyncRateLimitErrorRetryHandler
from slack_sdk.scim.v1.async_client import AsyncSCIMClient
from tests.helpers import async_test
from tests.slack_sdk.scim.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from ..my_retry_handler import MyRetryHandler
class TestSCIMClient_HttpRetries(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
@async_test
async def test_retries(self):
retry_handler = MyRetryHandler(max_retry_count=2)
client = AsyncSCIMClient(
base_url="http://localhost:8888/",
token="xoxp-remote_disconnected",
retry_handlers=[retry_handler],
)
try:
await client.search_users(start_index=0, count=1)
self.fail("An exception is expected")
except Exception as _:
pass
self.assertEqual(2, retry_handler.call_count)
@async_test
async def test_ratelimited(self):
client = AsyncSCIMClient(
base_url="http://localhost:8888/",
token="xoxp-ratelimited",
retry_handlers=[AsyncRateLimitErrorRetryHandler()],
)
response = await client.search_users(start_index=0, count=1)
# Just running retries; no assertions for call count so far
self.assertEqual(429, response.status_code)
| slackapi/python-slackclient | tests/slack_sdk_async/scim/test_async_client_http_retry.py | Python | mit | 1,508 |
import sys
from subprocess import check_output
def get_changed_files(sha='HEAD~1'):
return check_output(['git', 'diff', '--name-only', 'HEAD', sha]).splitlines()
def get_changed_mxds(sha='HEAD~1'):
return [f for (f) in get_changed_files(sha) if str(f).lower().endswith('mxd')]
| gisinc/slap | slap/git.py | Python | mit | 289 |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from inspect import cleandoc
import tempfile
import os
import shutil
BUFSIZE = 65536
class BuildRepo(object):
def __init__(self, module):
self.module = module
params = module.params
self.gpg = self.module.get_bin_path('gpg')
self.rpm = self.module.get_bin_path('rpm', True)
self.yum = self.module.get_bin_path('yum', True)
if params['use_local']:
self.repo = params['local_repo']
self.gpgkey = params['local_key']
# remote_repo: epel-release
# local_repo: http://some-local-server/path
# will be converted to local-repo: epel-release
if ('/' not in params['remote_repo']
and '/' in params['local_repo']):
self.repo = params['remote_repo']
# we can use something like
# remote_repo: http://some.server/full/path/file.repo
# local_repo: http://other.server/another/path/
elif params['local_repo'].endswith('/'):
file_name = params['remote_repo'].split('/')[-1]
self.repo += file_name
if params['remote_key'] and params['remote_key'].endswith('/'):
file_name = params['remote_key'].split('/')[-1]
self.gpgkey += file_name
else:
self.repo = params['remote_repo']
self.gpgkey = params['remote_key']
def run(self):
if self.repo.endswith('.repo'):
self.put_repofile()
return
if self.repo.endswith('.rpm') or not '/' in self.repo:
self.install_rpm()
return
self.create_repo_from_url()
return
def get_file(self, url):
if '://' not in url:
# deal with local file
return url
# we have remote file
suffix = url.split('/')[-1]
with tempfile.NamedTemporaryFile(
suffix=suffix, delete=False) as tmp_file:
self.module.add_cleanup_file(tmp_file.name)
try:
rsp, info = fetch_url(self.module, url)
if not rsp:
msg = "Failure downloading {}, {}".format(url, info['msg'])
self.module.fail_json(msg=msg)
data = rsp.read(BUFSIZE)
while data:
tmp_file.write(data)
data = rsp.read(BUFSIZE)
except Exception as e:
msg = "Failure downloading {}, {}".format(url, to_native(e))
self.module.fail_json(msg=msg)
return tmp_file.name
def put_repofile(self):
repofile_name = self.repo.split('/')[-1]
tmp_repofile = self.get_file(self.repo)
changed_key = self.put_gpgkey()
yum_repos_d_repofile = '/etc/yum.repos.d/' + repofile_name
if not os.path.exists(yum_repos_d_repofile):
shutil.copy(tmp_repofile, yum_repos_d_repofile)
self.module.exit_json(changed=True, meta=self.repo)
tmp_md5 = self.module.md5(tmp_repofile)
local_file_md5 = self.module.md5(yum_repos_d_repofile)
if tmp_md5 != local_file_md5:
shutil.copyfile(tmp_repofile, yum_repos_d_repofile)
self.module.exit_json(changed=True, meta=self.repo)
self.module.exit_json(changed=False | changed_key, meta=self.repo)
def put_gpgkey(self):
if not self.gpgkey:
return False
tmp_key_file = self.get_file(self.gpgkey)
keyid = self.normalize_keyid(self.getkeyid(tmp_key_file))
if self.is_key_imported(keyid):
return False
self.import_key(tmp_key_file)
return True
def get_pkg_name(self, url):
pkg_file = None
if '/' in self.repo or self.repo.endswith('.rpm'):
pkg_file = self.get_file(self.repo)
pkg_name = self.repo
if pkg_file is None:
return pkg_name, pkg_file
cmd = self.rpm + " --qf '%{name}' -qp " + pkg_file
stdout, _ = self.execute_command(cmd)
lines = stdout.splitlines()
if len(lines) == 0:
msg = "Unable to find package name for {}; {}; {}".format(
url, cmd, lines)
self.module.fail_json(msg=msg)
if len(lines) > 1:
msg = "Several package names returned for {}".format(url)
self.module.fail_json(msg=msg)
return lines[0], pkg_file
def is_rpm_installed(self, pkg_name):
cmd = self.rpm + ' -q ' + pkg_name
rc, _, _ = self.module.run_command(cmd)
return not rc
def install_rpm(self):
pkg_name, pkg_path = self.get_pkg_name(self.repo)
changed_key = self.put_gpgkey()
if self.is_rpm_installed(pkg_name):
self.module.exit_json(changed=False | changed_key, meta=self.repo)
cmd = self.yum + ' --setopt=*.skip_if_unavailable=1 -y install '
if pkg_path is None:
cmd += pkg_name
else:
cmd += pkg_path
_, _ = self.execute_command(cmd)
self.module.exit_json(changed=True, meta=self.repo)
def create_repo_from_url(self):
if not self.module.params['name']:
msg = "No name specified for the repo"
self.module.fail_json(msg=msg)
repo_name = self.module.params['name']
fmt = {
'name': repo_name,
'repo': self.repo,
'gpgcheck': 1 if self.gpgkey else 0
}
repo_file_name = repo_name + '.repo'
repo_data = (
"""
[{name}]
name={name}
baseurl={repo}
enabled=1
gpgcheck={gpgcheck}
"""
)
repo_data = cleandoc(repo_data.format(**fmt))
tmp_dir = tempfile.gettempdir()
tmp_file_name = tmp_dir + '/' + repo_file_name
with open(tmp_file_name, 'w') as tmp_file:
self.module.add_cleanup_file(tmp_file_name)
try:
tmp_file.write(repo_data)
except Exception as e:
msg = "Unable to write data to {}: {}".format(
tmp_file_name, to_native(e))
self.module.fail_json(msg=msg)
self.repo = tmp_file_name
self.put_repofile()
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x,
has leading or trailing whitespace, and make sure is uppercase"""
ret = keyid.strip().upper()
if ret.startswith('0x'):
return ret[2:]
elif ret.startswith('0X'):
return ret[2:]
else:
return ret
def execute_command(self, cmd):
rc, stdout, stderr = self.module.run_command(
cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg=stderr)
return stdout, stderr
def is_key_imported(self, keyid):
cmd = self.rpm + ' -q gpg-pubkey'
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: # No key is installed on system
return False
cmd += ' --qf "%{description}" | '
cmd += self.gpg
cmd += ' --no-tty --batch --with-colons --fixed-list-mode -'
stdout, stderr = self.execute_command(cmd)
for line in stdout.splitlines():
if keyid in line.split(':')[4]:
return True
return False
def import_key(self, keyfile):
if not self.module.check_mode:
self.execute_command([self.rpm, '--import', keyfile])
def main():
module = AnsibleModule(
argument_spec={
'remote_repo': {'type': 'str', 'required': True},
'remote_key': {'type': 'str'},
'local_repo': {'type': 'str'},
'local_key': {'type': 'str'},
'name': {'type': 'str'},
'use_local': {'type': 'bool', 'default': False},
}
)
BuildRepo(module).run()
if __name__ == '__main__':
main()
| clustervision/trinityX | site/library/trix_repos.py | Python | gpl-2.0 | 8,238 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Rating'
db.create_table('project_rating', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('criterion', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['project.Criterion'])),
('segment', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['project.Segment'])),
('block_index', self.gf('django.db.models.fields.PositiveIntegerField')()),
('score', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('project', ['Rating'])
# Adding model 'Criterion'
db.create_table('project_criterion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('prompt', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('project', ['Criterion'])
def backwards(self, orm):
# Deleting model 'Rating'
db.delete_table('project_rating')
# Deleting model 'Criterion'
db.delete_table('project_criterion')
models = {
'project.criterion': {
'Meta': {'object_name': 'Criterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prompt': ('django.db.models.fields.TextField', [], {})
},
'project.rating': {
'Meta': {'object_name': 'Rating'},
'block_index': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['project.Criterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {}),
'segment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['project.Segment']"}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'project.segment': {
'Meta': {'object_name': 'Segment', 'db_table': "u'philly_street_osm_line'", 'managed': 'False'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True', 'db_column': "'osm_id'"}),
'way': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '900913', 'null': 'True'})
}
}
complete_apps = ['project']
| openplans/streetscore | street_score/project/migrations/0001_initial.py | Python | mit | 3,124 |
import threading
from contextlib import contextmanager
import os
from os.path import dirname, abspath, join as pjoin
import shutil
from subprocess import check_call, check_output, STDOUT
import sys
from tempfile import mkdtemp
from . import compat
__all__ = [
'BackendUnavailable',
'BackendInvalid',
'HookMissing',
'UnsupportedOperation',
'default_subprocess_runner',
'quiet_subprocess_runner',
'Pep517HookCaller',
]
try:
import importlib.resources as resources
def _in_proc_script_path():
return resources.path(__package__, '_in_process.py')
except ImportError:
@contextmanager
def _in_proc_script_path():
yield pjoin(dirname(abspath(__file__)), '_in_process.py')
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
def __init__(self, traceback):
self.traceback = traceback
class BackendInvalid(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
self.backend_name = backend_name
self.backend_path = backend_path
self.message = message
class HookMissing(Exception):
"""Will be raised on missing hooks."""
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def __init__(self, traceback):
self.traceback = traceback
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""A method of calling the wrapper subprocess while suppressing output."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
def norm_and_check(source_tree, requested):
"""Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
"""
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
# We have to use commonprefix for Python 2.7 compatibility. So we
# normalise case to avoid problems because commonprefix is a character
# based comparison :-(
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
:param source_dir: The path to the source directory, containing
pyproject.toml.
:param build_backend: The build backend spec, as per PEP 517, from
pyproject.toml.
:param backend_path: The backend path, as per PEP 517, from pyproject.toml.
:param runner: A callable that invokes the wrapper subprocess.
:param python_executable: The Python executable used to invoke the backend
The 'runner', if provided, must expect the following:
- cmd: a list of strings representing the command and arguments to
execute, as would be passed to e.g. 'subprocess.check_call'.
- cwd: a string representing the working directory that must be
used for the subprocess. Corresponds to the provided source_dir.
- extra_environ: a dict mapping environment variable names to values
which must be set for the subprocess execution.
"""
def __init__(
self,
source_dir,
build_backend,
backend_path=None,
runner=None,
python_executable=None,
):
if runner is None:
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [
norm_and_check(self.source_dir, p) for p in backend_path
]
self.backend_path = backend_path
self._subprocess_runner = runner
if not python_executable:
python_executable = sys.executable
self.python_executable = python_executable
@contextmanager
def subprocess_runner(self, runner):
"""A context manager for temporarily overriding the default subprocess
runner.
"""
prev = self._subprocess_runner
self._subprocess_runner = runner
try:
yield
finally:
self._subprocess_runner = prev
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a ``*.dist-info`` folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
# For backend_path, we use sys.getfilesystemencoding.
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
if sys.version_info[0] == 2:
backend_path = backend_path.encode(sys.getfilesystemencoding())
extra_environ['PEP517_BACKEND_PATH'] = backend_path
with tempdir() as td:
hook_input = {'kwargs': kwargs}
compat.write_json(hook_input, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
with _in_proc_script_path() as script:
python = self.python_executable
self._subprocess_runner(
[python, abspath(str(script)), hook_name, td],
cwd=self.source_dir,
extra_environ=extra_environ
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''))
if data.get('backend_invalid'):
raise BackendInvalid(
backend_name=self.build_backend,
backend_path=self.backend_path,
message=data.get('backend_error', '')
)
if data.get('hook_missing'):
raise HookMissing(hook_name)
return data['return_val']
class LoggerWrapper(threading.Thread):
"""
Read messages from a pipe and redirect them
to a logger (see python's logging module).
"""
def __init__(self, logger, level):
threading.Thread.__init__(self)
self.daemon = True
self.logger = logger
self.level = level
# create the pipe and reader
self.fd_read, self.fd_write = os.pipe()
self.reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
return self.fd_write
@staticmethod
def remove_newline(msg):
return msg[:-1] if msg.endswith(os.linesep) else msg
def run(self):
for line in self.reader:
self._write(self.remove_newline(line))
def _write(self, message):
self.logger.log(self.level, message)
| foobarbazblarg/stayclean | stayclean-2020-december/venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py | Python | mit | 11,290 |
import openpnm as op
class CubicDualTest:
def setup_class(self):
pass
def teardown_class(self):
pass
def test_generation_3D(self):
net = op.network.CubicDual(shape=[5, 5, 5], label_1='primary',
label_2='secondary')
assert net.Np == 285
assert net.Nt == 1436
assert net.num_pores('all') == 285
assert net.num_pores('back') == 41
assert net.num_pores('bottom') == 41
assert net.num_pores('front') == 41
assert net.num_pores('internal') == 285
assert net.num_pores('left') == 41
assert net.num_pores('primary') == 125
assert net.num_pores('right') == 41
assert net.num_pores('secondary') == 160
assert net.num_pores('surface') == 194
assert net.num_pores('top') == 41
assert net.num_throats('all') == 1436
assert net.num_throats('interconnect') == 896
assert net.num_throats('internal') == 1436
assert net.num_throats('primary') == 300
assert net.num_throats('secondary') == 240
assert net.num_throats('surface') == 576
assert net.num_throats('top') == 104
assert net.num_throats('bottom') == 104
assert net.num_throats('left') == 104
assert net.num_throats('right') == 104
assert net.num_throats('front') == 104
assert net.num_throats('back') == 104
def test_generation_2D_XY(self):
net = op.network.CubicDual(shape=[5, 5, 1], label_1='primary',
label_2='secondary')
assert net.Np == 57
assert net.Nt == 176
assert net.num_pores('left') == 9
assert net.num_pores('right') == 9
assert net.num_pores('front') == 9
assert net.num_pores('back') == 9
assert net.num_throats('interconnect') == 96
def test_generation_2D_XZ(self):
net = op.network.CubicDual(shape=[5, 1, 5], label_1='primary',
label_2='secondary')
assert net.Np == 57
assert net.Nt == 176
assert net.num_pores('front') == 9
assert net.num_pores('back') == 9
assert net.num_pores('top') == 9
assert net.num_pores('bottom') == 9
assert net.num_throats('interconnect') == 96
def test_generation_2D_YZ(self):
net = op.network.CubicDual(shape=[1, 5, 5], label_1='primary',
label_2='secondary')
assert net.Np == 57
assert net.Nt == 176
assert net.num_pores('left') == 9
assert net.num_pores('right') == 9
assert net.num_pores('top') == 9
assert net.num_pores('bottom') == 9
assert net.num_throats('interconnect') == 96
def test_generation_2D_2_dims(self):
net = op.network.CubicDual(shape=[5, 5], label_1='primary',
label_2='secondary')
assert net.Np == 57
assert net.Nt == 176
assert net.num_pores('left') == 9
assert net.num_pores('right') == 9
assert net.num_pores('front') == 9
assert net.num_pores('back') == 9
assert net.num_throats('interconnect') == 96
if __name__ == '__main__':
t = CubicDualTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| TomTranter/OpenPNM | tests/unit/network/CubicDualTest.py | Python | mit | 3,436 |
"""
正規表現のサンプルです。
絶対最大量指定子 (possessive quantifier) について
(絶対最大量指定子は、強欲な量指定子ともいう)
REFERENCES:: http://bit.ly/2NW2TAq
http://bit.ly/2NXU6Ow
http://bit.ly/2NXUcFS
http://bit.ly/2NZDm9v
http://bit.ly/2NXxyNQ
"""
import re
import regex
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------------------------------
# 絶対最大量指定子 (possessive quantifier) について
# -----------------------------------------
# アトミックグループ (atomic groups) と同じ考え方で導入されたのが
# 「*+」というメタキャラクタが従う「強欲」という考え方。
#
# 「*+」は「*」と似ているが、アトミックグループと同様に、マッチした範囲のステートを
# 破棄してバックトラックをしないようになる
#
# アトミックグループと同様に、この絶対最大量指定子も標準モジュール re ではサポート
# されていない。 regex モジュールではサポートされている。
# ------------------------------------------------------------------------
# 標準 re モジュールは 「*+」をサポートしていない
s = 'aaaabbbb'
p = r'.*+b+'
try:
re.compile(p)
except re.error as e:
pr('re.error', '標準モジュール re では 「*+」はサポートされていない', e)
# regex モジュールは 「*+」をサポートしている
# この場合、パターンとして指定している「.*+b+」の「.*+」が aaaabbbb 全体に
# マッチするが、まだパターンとして「b+」が残っているためバックトラックしようと
# するが、絶対最大量指定子を指定しているため、バックトラックが発生せずにここで
# マッチ失敗と判定される。
r = regex.compile(p)
m = r.match(s)
if not m:
pr('.*+b+', '絶対最大量指定子を使っているのでマッチしない (正解)')
# パターンから絶対最大量指定子をなくして、「.*b+」とすると当然マッチする
p = r'.*b+'
r = regex.compile(p)
m = r.match(s)
if m:
pr('.*b+', '絶対最大量指定子を使っていないのでマッチする (正解)')
def go():
obj = Sample()
obj.exec()
| devlights/try-python | trypython/stdlib/re_/re05.py | Python | mit | 2,745 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
import sys
from pycopia import scheduler
from pycopia import proctools
class Pinger(proctools.ProcessPipe):
"""
This class is an interface to, and opens a pipe to, the pyntping program.
This program actually does a bit more than ping, but pinging is the most
common use. The pyntping program is a C program that should be installed
in your path, owned by root, with SUID bit set. This is necessary because
only root can open RAW sockets for ICMP operations.
Methods are listed below. Most methods take a single host, or multiple
hosts when called. If a single host is given, a single value will be
returned. If more than one is given, a list of result will be returned.
The following attributes may also be adjusted:
retries
timeout
delay
size
hops
"""
# ICMP methods
def echo(self, *hosts):
cmdstr = ""
return self.__do_command(cmdstr, hosts)
# return a sublist of only those hosts that are reachable
def reachablelist(self, *hosts):
cmdstr = ""
rv = self.__do_command(cmdstr, hosts)
return filter(lambda x: x[1] >= 0, rv)
# return a boolean value if a host is reachable.
# If list given, return a list of (host, reachable) tuples.
def reachable(self, *hosts):
cmdstr = ""
rv = self.__do_command(cmdstr, hosts)
return map(lambda x: (x[0], x[1] >= 0), rv)
def ping(self, *hosts):
cmdstr = ""
return self.__do_command(cmdstr, hosts)
def mask(self, *hosts):
cmdstr = "-mask "
return self.__do_command(cmdstr, hosts)
def timestamp(self, *hosts):
cmdstr = "-tstamp "
return self.__do_command(cmdstr, hosts)
def ttl(self, *hosts):
cmdstr = "-ttl %d " % (self.hops)
return self.__do_command(cmdstr, hosts)
def trace(self, *hosts):
cmdstr = "-trace %d " % (self.hops)
return self.__do_command(cmdstr, hosts)
def __do_command(self, cmdstr, hosts):
for host in hosts:
if isinstance(host, list):
s = []
for hle in host:
s.append(str(hle))
cmdstr += " ".join(s)
else:
cmdstr += " %s" % host
rqst = "-size %d -timeout %d -retries %d -delay %d %s\n" % \
(self.size, self.timeout, self.retries, self.delay, cmdstr)
self._write(rqst)
# now try and get all of pyntping's output
resstr = self._read(4096)
scheduler.sleep(1)
while self.fstat().st_size != 0:
next = self._read(4096)
if next:
resstr += next
scheduler.sleep(1)
# we should have got a tuple of tuples
result = eval(resstr)
return result
#### end Ping
## some factory/utility functions
def get_pinger(retries=3, timeout=5, delay=0, size=64, hops=30, logfile=None):
"""Returns a Pinger process that you can call various ICMP methods
on."""
pm = proctools.get_procmanager()
pinger = pm.spawnprocess(Pinger, "pyntping -b", logfile=logfile, env=None, callback=None,
persistent=False, merge=True, pwent=None, async=False, devnull=False)
pinger.retries = retries
pinger.timeout = timeout
pinger.delay = delay
pinger.size = size
pinger.hops = hops
return pinger
def reachable_hosts(hostlist):
"""
reachable_hosts(hostlist)
where <hostlist> is a list of host strings.
"""
pinger = get_pinger()
res = pinger.reachable(hostlist)
return map(lambda x: x[0], res)
def scan_net(net):
"""
scan_net(network)
where <network> is an IPv4 object or list with host and broadcast elements at ends.
"""
pinger = get_pinger()
res = pinger.reachablelist(net[1:-1])
return map(lambda x: x[0], res)
def traceroute(hostip, maxhops=30):
"""
traceroute(hostip, maxhops=30)
return a list of (ipaddr, time) tuples tracing a path to the given hostip.
"""
tracelist = []
pinger = get_pinger()
for ttl in xrange(maxhops):
pinger.hops = ttl+1
nexthop = pinger.ttl(hostip)[0]
if nexthop[0] != hostip:
tracelist.append(nexthop)
else:
tracelist.append(nexthop)
return tracelist
return tracelist
def ping(host, retries=3, timeout=5, delay=1, size=64, hops=30):
pinger = get_pinger(retries, timeout, delay, size, hops)
sum = 0
Nxmit = 0
Nrecv = 0
_min = sys.maxint
_max = 0
print "Pinging %s with %d bytes of data." % (host, size)
try:
while 1: # escape with SIGINT (^C)
Nxmit = Nxmit + 1
host, rttime = pinger.ping(host)[0]
if rttime >= 0:
sum += rttime
Nrecv = Nrecv + 1
_min = min(_min, rttime)
_max = max(_max, rttime)
print "%-16s %d ms" % (host, rttime)
scheduler.sleep(pinger.delay)
except KeyboardInterrupt:
print "%d packets transmitted, %d packets received, %d%% packet loss" % (Nxmit, Nrecv, 100-(Nxmit/Nrecv*100))
print "round-trip min/avg/max = %d/%d/%d ms" % (_min, sum/Nrecv, _max)
def reachable(*hosts):
pinger = get_pinger()
return pinger.reachable(*hosts)
| xiangke/pycopia | net/pycopia/ping.py | Python | lgpl-2.1 | 5,969 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for export tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.session_bundle import manifest_pb2
class ExportTest(tf.test.TestCase):
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
def testExportMonitor_EstimatorProvidesSignature(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1, export_dir=export_dir, exports_to_keep=2)
regressor.fit(x, y, steps=10,
monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
# Only the written checkpoints are exported.
self.assertTrue(tf.gfile.Exists(export_dir + '00000001/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
self.assertEquals(export_monitor.last_export_dir, os.path.join(export_dir,
'00000010'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('regression_signature'))
def testExportMonitor(self):
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1, export_dir=export_dir, exports_to_keep=2,
signature_fn=export.generic_signature_fn)
regressor.fit(x, y, steps=10,
monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
# Only the written checkpoints are exported.
self.assertTrue(tf.gfile.Exists(export_dir + '00000001/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
self.assertEquals(export_monitor.last_export_dir, os.path.join(export_dir,
'00000010'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('generic_signature'))
def testExportMonitorRegressionSignature(self):
def _regression_signature(examples, unused_features, predictions):
signatures = {}
signatures['regression'] = (
tf.contrib.session_bundle.exporter.regression_signature(examples,
predictions))
return signatures['regression'], signatures
random.seed(42)
x = np.random.rand(1000)
y = 2 * x + 3
cont_features = [tf.contrib.layers.real_valued_column('', dimension=1)]
regressor = learn.LinearRegressor(feature_columns=cont_features)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=_regression_signature)
regressor.fit(x, y, steps=10, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000010/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000010/export.meta')
self.assertTrue(signature.HasField('regression_signature'))
if __name__ == '__main__':
tf.test.main()
| cg31/tensorflow | tensorflow/contrib/learn/python/learn/utils/export_test.py | Python | apache-2.0 | 5,206 |
#!/usr/bin/env python
# Copyright 2002 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for datelib.py module."""
import datetime
import random
import time
import pytz
from google_apputils import basetest
from google_apputils import datelib
class TimestampUnitTest(basetest.TestCase):
seed = 1979
def testTzAwareSuccession(self):
a = datelib.Timestamp.now()
b = datelib.Timestamp.utcnow()
self.assertLessEqual(a, b)
def testTzRandomConversion(self):
random.seed(self.seed)
for unused_i in xrange(100):
stz = pytz.timezone(random.choice(pytz.all_timezones))
a = datelib.Timestamp.FromString('2008-04-12T10:00:00', stz)
b = a
for unused_j in xrange(100):
b = b.astimezone(pytz.timezone(random.choice(pytz.all_timezones)))
self.assertEqual(a, b)
random.seed()
def testMicroTimestampConversion(self):
"""Test that f1(f2(a)) == a."""
def IsEq(x):
self.assertEqual(
x, datelib.Timestamp.FromMicroTimestamp(x).AsMicroTimestamp())
IsEq(0)
IsEq(datelib.MAXIMUM_MICROSECOND_TIMESTAMP)
random.seed(self.seed)
for _ in xrange(100):
IsEq(random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP))
def testMicroTimestampKnown(self):
self.assertEqual(0, datelib.Timestamp.FromString(
'1970-01-01T00:00:00', pytz.UTC).AsMicroTimestamp())
self.assertEqual(
datelib.MAXIMUM_MICROSECOND_TIMESTAMP,
datelib.MAXIMUM_MICROSECOND_TIMESTAMP_AS_TS.AsMicroTimestamp())
def testMicroTimestampOrdering(self):
"""Test that cmp(a, b) == cmp(f1(a), f1(b))."""
def IsEq(a, b):
self.assertEqual(
cmp(a, b),
cmp(datelib.Timestamp.FromMicroTimestamp(a),
datelib.Timestamp.FromMicroTimestamp(b)))
random.seed(self.seed)
for unused_i in xrange(100):
IsEq(
random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP),
random.randint(0, datelib.MAXIMUM_MICROSECOND_TIMESTAMP))
def testCombine(self):
for tz in (datelib.UTC, datelib.US_PACIFIC):
self.assertEqual(
datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0, tz),
datelib.Timestamp.combine(
datelib.datetime.date(1970, 1, 1),
datelib.datetime.time(0, 0, 0),
tz))
self.assertEqual(
datelib.Timestamp(9998, 12, 31, 23, 59, 59, 999999, tz),
datelib.Timestamp.combine(
datelib.datetime.date(9998, 12, 31),
datelib.datetime.time(23, 59, 59, 999999),
tz))
def testFromString1(self):
for string_zero in (
'1970-01-01 00:00:00',
'19700101T000000',
'1970-01-01T00:00:00'
):
for testtz in (datelib.UTC, datelib.US_PACIFIC):
self.assertEqual(
datelib.Timestamp.FromString(string_zero, testtz),
datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0, testtz))
self.assertEqual(
datelib.Timestamp.FromString(
'1970-01-01T00:00:00+0000', datelib.US_PACIFIC),
datelib.Timestamp(1970, 1, 1, 0, 0, 0, 0, datelib.UTC))
startdate = datelib.Timestamp(2009, 1, 1, 3, 0, 0, 0, datelib.US_PACIFIC)
for day in xrange(1, 366):
self.assertEqual(
datelib.Timestamp.FromString(startdate.isoformat()),
startdate,
'FromString works for day %d since 2009-01-01' % day)
startdate += datelib.datetime.timedelta(days=1)
def testFromString2(self):
"""Test correctness of parsing the local time in a given timezone.
The result shall always be the same as tz.localize(naive_time).
"""
baseday = datelib.datetime.date(2009, 1, 1).toordinal()
for day_offset in xrange(0, 365):
day = datelib.datetime.date.fromordinal(baseday + day_offset)
naive_day = datelib.datetime.datetime.combine(
day, datelib.datetime.time(0, 45, 9))
naive_day_str = naive_day.strftime('%Y-%m-%dT%H:%M:%S')
self.assertEqual(
datelib.US_PACIFIC.localize(naive_day),
datelib.Timestamp.FromString(naive_day_str, tz=datelib.US_PACIFIC),
'FromString localizes time incorrectly')
def testFromStringInterval(self):
expected_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
expected_s = time.mktime(expected_date.utctimetuple())
actual_date = datelib.Timestamp.FromString('1d')
actual_s = time.mktime(actual_date.timetuple())
diff_seconds = actual_s - expected_s
self.assertBetween(diff_seconds, 0, 1)
self.assertRaises(
datelib.TimeParseError, datelib.Timestamp.FromString, 'wat')
def _EpochToDatetime(t, tz=None):
if tz is not None:
return datelib.datetime.datetime.fromtimestamp(t, tz)
else:
return datelib.datetime.datetime.utcfromtimestamp(t)
class DatetimeConversionUnitTest(basetest.TestCase):
def setUp(self):
self.pst = pytz.timezone('US/Pacific')
self.utc = pytz.utc
self.now = time.time()
def testDatetimeToUTCMicros(self):
self.assertEqual(
0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0)))
self.assertEqual(
1001 * long(datelib._MICROSECONDS_PER_SECOND),
datelib.DatetimeToUTCMicros(_EpochToDatetime(1001)))
self.assertEqual(long(self.now * datelib._MICROSECONDS_PER_SECOND),
datelib.DatetimeToUTCMicros(_EpochToDatetime(self.now)))
# tzinfo shouldn't change the result
self.assertEqual(
0, datelib.DatetimeToUTCMicros(_EpochToDatetime(0, tz=self.pst)))
def testDatetimeToUTCMillis(self):
self.assertEqual(
0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0)))
self.assertEqual(
1001 * 1000L, datelib.DatetimeToUTCMillis(_EpochToDatetime(1001)))
self.assertEqual(long(self.now * 1000),
datelib.DatetimeToUTCMillis(_EpochToDatetime(self.now)))
# tzinfo shouldn't change the result
self.assertEqual(
0, datelib.DatetimeToUTCMillis(_EpochToDatetime(0, tz=self.pst)))
def testUTCMicrosToDatetime(self):
self.assertEqual(_EpochToDatetime(0), datelib.UTCMicrosToDatetime(0))
self.assertEqual(_EpochToDatetime(1.000001),
datelib.UTCMicrosToDatetime(1000001))
self.assertEqual(_EpochToDatetime(self.now), datelib.UTCMicrosToDatetime(
long(self.now * datelib._MICROSECONDS_PER_SECOND)))
# Check timezone-aware comparisons
self.assertEqual(_EpochToDatetime(0, self.pst),
datelib.UTCMicrosToDatetime(0, tz=self.pst))
self.assertEqual(_EpochToDatetime(0, self.pst),
datelib.UTCMicrosToDatetime(0, tz=self.utc))
def testUTCMillisToDatetime(self):
self.assertEqual(_EpochToDatetime(0), datelib.UTCMillisToDatetime(0))
self.assertEqual(_EpochToDatetime(1.001), datelib.UTCMillisToDatetime(1001))
t = time.time()
dt = _EpochToDatetime(t)
# truncate sub-milli time
dt -= datelib.datetime.timedelta(microseconds=dt.microsecond % 1000)
self.assertEqual(dt, datelib.UTCMillisToDatetime(long(t * 1000)))
# Check timezone-aware comparisons
self.assertEqual(_EpochToDatetime(0, self.pst),
datelib.UTCMillisToDatetime(0, tz=self.pst))
self.assertEqual(_EpochToDatetime(0, self.pst),
datelib.UTCMillisToDatetime(0, tz=self.utc))
class MicrosecondsToSecondsUnitTest(basetest.TestCase):
def testConversionFromMicrosecondsToSeconds(self):
self.assertEqual(0.0, datelib.MicrosecondsToSeconds(0))
self.assertEqual(7.0, datelib.MicrosecondsToSeconds(7000000))
self.assertEqual(1.234567, datelib.MicrosecondsToSeconds(1234567))
self.assertEqual(12345654321.123456,
datelib.MicrosecondsToSeconds(12345654321123456))
if __name__ == '__main__':
basetest.main()
| jeremydw/google-apputils-python | tests/datelib_unittest.py | Python | apache-2.0 | 8,310 |
# coding: utf-8
from quokka.core.app import QuokkaModule
from .views import CommentView
module = QuokkaModule("comments", __name__, template_folder="templates")
module.add_url_rule('/comment/<path:path>/',
view_func=CommentView.as_view('comment'))
| maurobaraldi/quokka | quokka/modules/comments/__init__.py | Python | mit | 270 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
import django.views
from OMRS import views
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'openMRScap.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#url(r'^$',views.index,name='index'),
#url(r'^$',views.index,name='server'),
#add other projects URLS
#url(r'^OMRS/',include('OMRS.urls')),
url(r'^$','OMRS.views.index', name='home'),
#url(r'^server/',views.jobs.as_view(),name='server'),
url(r'^jobs/',views.jobs.as_view(),name='jobs'),
url(r'^admin/', include(admin.site.urls)),
url(r'^server/$','OMRS.views.server'),
url(r'^userprofile/$','OMRS.views.userProfile',name='userprofile'),
url(r'^jobserversettings/$','OMRS.views.userJobSettings'), #lists just the URLS of the servers in the system
url(r'^restricted/$', 'OMRS.views.restricted', name='restricted'), #not doing anything yet
url(r'^setup/$', 'OMRS.views.post_server_details',name='setup'),
#user details
url(r'^register/$', 'OMRS.views.register', name='register'),
url(r'^login/$', 'OMRS.views.user_login',name='login'),
url(r'^logout/$', 'OMRS.views.user_logout', name='logout'),
#import file
url(r'^upload/$', 'OMRS.views.upload', name='upload'),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'media/(?P<path>.*)',
django.views.static.serve,
{'document_root': settings.MEDIA_ROOT}),
)
| omiltoro/softbrew | openMRScap/urls.py | Python | apache-2.0 | 1,624 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# All Rights Reserved.
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import uuid
import os
from signal import SIGTERM
from datetime import datetime
from oslo.config import cfg
import cinder.context
from cinder import exception
from paxes_cinder import exception as paxes_exception
from cinder import quota
from cinder import utils
from cinder.db import api as db_api
from cinder.image import glance
from cinder.openstack.common import excutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import periodic_task
from cinder.openstack.common import timeutils
from paxes_cinder import _
from cinder import rpc
from cinder.openstack.common import processutils
from cinder.volume import manager
from cinder.volume import volume_types
import eventlet.greenthread as greenthread
from paxes_cinder.db import api as paxes_db
from paxes_cinder.volume import discovery_driver
RESTRICTED_METADATA_CONN_WWPN_KEY = 'connector_wwpn'
RESTRICTED_METADATA_CONN_HOST_KEY = 'connector_host'
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('volume_driver', 'cinder.volume.manager')
volume_direct_access_manager_opts = [
cfg.ListOpt('san_allowed_prefixes',
default=['image-'],
help='List of prefixes of SAN volume names under our control'),
cfg.StrOpt('host_type',
default=None,
help='The Type of Storage Back-end'),
cfg.StrOpt('host_uniqueid',
default=None,
help='The Back-end Unique ID'),
cfg.StrOpt('host_display_name',
default=None,
help='The user display name'),
cfg.IntOpt('numtries_during_init',
default=2,
help='Number of times to try if driver is initializing'),
cfg.IntOpt('sleeptime_during_init',
default=10,
help='Number of seconds to sleep between retries if driver is'
'initializing')
]
CONF.register_opts(volume_direct_access_manager_opts)
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
CONF.import_opt('zoning_mode', 'cinder.volume.manager')
class DirectAccessWithBadPrefixException(exception.CinderException):
message = _("Cannot perform direct SAN operation on disk with prefix "
"%(prefix)s")
def retry_during_init(function, descr):
for x in range(CONF.numtries_during_init):
try:
return function()
except exception.DriverNotInitialized:
if x >= (CONF.numtries_during_init - 1):
raise
LOG.info(_("Driver still initializing, wait..."))
greenthread.sleep(CONF.sleeptime_during_init)
LOG.info(_("Try again: %s") % descr)
def log_errors(manager, context, errors, metadata, is_volume_error,
volume_id, broadcasts=None):
"""
Constructs a dictionary by combining errors with metadata, and then
updates the volume metadata associated with the specified volume_id
Will also put the volume into the Error state if requested.
"""
# Add in the user-specified metadata
errors.update(metadata)
# get the existing volume metadata and merge it with errors metadata.
# Otherwise, volume_update will delete existing paxes boot
# volume metadata.
vol_metadata = manager.db.volume_metadata_get(context, volume_id)
if vol_metadata:
vol_metadata.update(errors)
else:
vol_metadata = errors
# Prepare the update
new_data = {'metadata': vol_metadata}
if is_volume_error:
new_data.update({'status': 'error'})
# Update the database
manager.db.volume_update(context, volume_id, new_data)
anotifier = rpc.get_notifier('volume', volume_id)
anotifier.info(
context, 'volume.update.errors', dict(volume_id=volume_id))
if broadcasts:
for message in broadcasts:
anotifier.error(context, 'cinder.volume.log', message)
def to_value(v):
"""
Truncate strings to fit in 255 characters, because that is the maximum
length of values in Volume metadata
"""
if isinstance(v, basestring):
return v[:255]
else:
return None
def build_metadata(e, prefix, volume_id):
"""
Build a dictionary containing all the error information we can extract from
the passed-in Exception 'e'. The 'prefix' is appended to the key values
that we create.
"""
is_volume_error = False
broadcasts = []
# Parallel exceptions from zoning operations cover everything that went
# wrong on all switches. We know that they are user-friendly and so are
# suitable candidates to be broadcast.
if isinstance(e, paxes_exception.ZoneManagerParallel):
for e2 in e.exceptions:
# If a contained exception is a SwitchException, then it has
# details of the switch with the error.
if isinstance(e2, paxes_exception.FabricException):
info = {'msg': _("Fabric {fabric_name} ({user}@{ip}:{port}): "
"%(message)s") %
{'message': e2.msg},
'fabric_name': e2.descriptor.name,
'fabric_display_name': e2.descriptor.display_name,
'user': e2.descriptor.username,
'ip': e2.descriptor.ip,
'port': e2.descriptor.port}
else:
info = {'msg': _("%s") % e2}
broadcasts.append(info)
try:
if isinstance(e, paxes_exception.SVCException):
# SVCException objects contains lots of useful information that we
# can report back to the user.
info = {'msg': _("Storage Provider {storage_provider_hostname} "
"({user}@{ip}:{port}) {volume_id}: %(message)s") %
{'message': e.msg},
'storage_provider_hostname': e.descriptor.name,
'storage_provider_display_name': e.descriptor.display_name,
'user': e.descriptor.username,
'ip': e.descriptor.ip,
'port': e.descriptor.port,
'volume_id': volume_id}
broadcasts.append(info)
is_volume_error = e.error_volume
if isinstance(e, processutils.ProcessExecutionError):
# SVC Error CMMVC5753E
# "The specified object does not exist or is not a suitable
# candidate."
# If we are doing a mkvdiskhostmap, then this error is produced if
# either the host or the disk does not exist. For our purposes,
# where we create the host definition, I think that the most
# user-friendly thing we can do is assume this is a case where a
# disk has been deleted from underneath us.
if "CMMVC5753E" in e.stderr and ("mkvdiskhostmap" in e.cmd or
"lsvdisk" in e.cmd):
e.description = "The volume no longer exists on the storage " \
"controller."
# SVC Error CMMVC6071E
# "The VDisk-to-host mapping was not created because the VDisk is
# already mapped to a host."
# If we are doing a mkvdiskhostmap, then this is caused when the
# disk is already in use by a host. If this relationship was
# already known about by PowerVC, then we wouldn't have got this
# far, so the most likely explanation is that the disk is mapped to
# a host that is not managed by PowerVC.
if "CMMVC6071E" in e.stderr and "mkvdiskhostmap" in e.cmd:
e.description = _("The volume is already attached to another "
"virtual server, possibly one that is not "
"managed by PowerVC.")
if "CMMVC5840E" in e.stderr and "rmvdisk" in e.cmd:
e.description = _("The volume cannot be deleted because it is "
"either mapped to a host or part of a "
"flashcopy or remote copy operation.")
info = {prefix + ' Failure, exit code ': e.exit_code,
prefix + ' Failure, stdout': to_value(e.stdout),
prefix + ' Failure, stderr': to_value(e.stderr),
prefix + ' Failure, failing command': to_value(e.cmd),
prefix + ' Failure description': to_value(e.description)
}
if e.description:
msg = _("%(error_prefix)s failure for volume {volume_id}: "
"%(error)s") % \
{'error_prefix': prefix,
'error': e.description}
else:
msg = _("%(error_prefix)s failure for volume {volume_id}."
"Command %(cmd)s, error %(error)s") % \
{'error_prefix': prefix,
'cmd': e.cmd,
'error': e.stderr}
bc_info = {'msg': msg, 'volume_id': volume_id}
broadcasts.append(bc_info)
elif isinstance(e, exception.CinderException):
msg = str(e.msg)
missing_vdisk = False
if "CMMVC5753E" in msg and ("mkvdiskhostmap" in msg or
"lsvdisk" in msg):
missing_vdisk = True
elif re.search('vdisk [0-9]* does not exist', msg):
missing_vdisk = True
if missing_vdisk:
is_volume_error = True
message = ("The volume no longer exists on the storage "
"controller.")
info = {prefix + ' Failure description': to_value(message)}
else:
info = {prefix + ' Failure description': msg[:255]}
else:
info = {prefix + ' Failure description': (_("%s") % e)[:255]}
except AttributeError as e:
# Something was not as we expected - never mind, we just stringify
# the exception and use that as the description.
info = {prefix + ' Failure description': (_("%s") % e)[:255]}
return (info, is_volume_error, broadcasts)
class PowerVCVolumeManager(manager.VolumeManager):
"""
The PowerVCVolumeManager is a subclass of the Volume Manager to provide
any additional capabilities that are needed as part of the PowerVC
product. This includes the following capabilities:
1) The ability to manage raw SAN disks that are not tracked by the
Cinder Database, such as glance images or ephemeral volumes used
for virtual server boot disks.
2) The population of an entry in a new storage_node table created by
PowerVC that will be used to store metric/status information for
the Storage Provider, similar to the compute_node table in Nova
Additional details on some of the specific implementation follows:
However, we don't want to allow this service to delete arbitrary disks
on the SAN that might not be owned by OpenStack. To constrain possible
damage, we only allow manipulation of disks whose names start with one
of a set of prefixes, and these prefixes are specified in the
configuration file using the san_allowed_prefixes option, e.g.:
san_allowed_prefixes=glance-,nova-
It is the responsibility of the caller (glance, nova...) to make sure
that the user has the correct authority to manipulate the specific disks,
and they should be careful to ensure that the disk name is not
manipulatable by the user, e.g. in metadata. For this reason, UUIDs make
good postfixes for disk names.
These functions are not exported through the cinder-api service; they are
only available via RPC calls via volume_API.
Example usage (from another service):
from cinder.volume import rpcapi as vol_rpcapi
msg = self.volume_rpcapi.make_msg('delete_san_disk',
'disk_prefix'='glance-'
'disk_postfix=glance_id)
self.volume_rpcapi.call(context, msg)
"""
def __init__(self, volume_driver=None,
service_name=None, *args, **kwargs):
self.storage_node = None
self.volume_chunk_map = dict()
self.volume_chunk_counter = 0
self.last_status_refresh = datetime.min
self.default_volume_type_id = None
self.first_storage_node_sync = True
LOG.debug('volume_driver: %s' % CONF.volume_driver)
super(PowerVCVolumeManager, self).__init__(
volume_driver=CONF.volume_driver, *args, **kwargs)
########################################################
######### Storage Metadata Implementation #########
########################################################
# @lockutils.synchronized('storage_metadata', 'cinder-', external=True)
def get_storage_metadata(self, context):
try:
metadata = self.driver.get_storage_metadata()
except AttributeError:
metadata = {}
return metadata
########################################################
######### Default Volume Type Implementation #########
########################################################
# @lockutils.synchronized('default_volume_type', 'cinder-', external=True)
def get_default_vol_type(self, context):
volume_type = self.driver.get_default_vol_type()
return volume_type
########################################################
######### Get Default Options Implementation #########
########################################################
def get_default_opts(self, context):
options = self.driver._build_default_opts()
return options
########################################################
######### Storage Node Insert Implementation ##########
########################################################
def _mask_deleting_volumes(self, context):
"""
Mask all the volume in deleting state from parent's init_host()
"""
volumes = self.db.volume_get_all_by_host(context, self.host)
for vol in volumes:
if vol['status'] == 'deleting':
# change it to resume-deleting
self.db.volume_update(context, vol['id'],
{'status': 'resume-deleting'})
def _unmask_deleting_volumes(self, context):
"""
Unmask all the volume in deleting state from parent's init_host()
"""
volumes = self.db.volume_get_all_by_host(context, self.host)
volid_del = []
for vol in volumes:
if vol['status'] == 'resume-deleting':
# change it to deleting
self.db.volume_update(context, vol['id'],
{'status': 'deleting'})
volid_del.append(vol['id'])
return volid_del
def _init_host_delete_volumes(self, context, del_volids):
""" worker thread to offload volume delete during
init_host(), which resumes delete of any volumes that are
still in deleting state. The volume delete will be handled
sequentially.
"""
for volid in del_volids:
try:
LOG.info(_("Resuming deletion of volume %s") % volid)
self.delete_volume(context, volid)
except Exception as ex:
message = (_("Failed to delete volume %(volid)s during volume "
"service startup. Error: %(error)s") %
{'volid': volid,
'error': (_("%s") % ex)})
LOG.error(message)
# yield in case we have massive deletion during startup.
greenthread.sleep(0)
def _init_host_rollback_attach_volumes(self, context, att_volids):
""" Workder thread to offload rollback of any volumes that are still
in the attaching state during init_host(). The volume attaching
rollback will be handled sequentially.
"""
for volid in att_volids:
LOG.info(_("Processing volume that was left in the attaching "
"state. Volume id: %(id)s") % dict(id=volid))
self._volume_attach_rollback(context, volid)
def _restricted_metadata_get_connector(self, context, volid):
""" retrieve the saved connector information in the volume
restricted metadata. If it return None, it means there is not
validate connector saved during the transaction. VolumeNotFound
exception may be thrown if no restricted metadata for the
volume.
"""
meta = paxes_db.volume_restricted_metadata_get(context, volid)
connector = {}
try:
wwpnlist = map(lambda kv: kv[1] if
RESTRICTED_METADATA_CONN_WWPN_KEY in
kv[0] else None, meta.items())
connector = {'host': meta[RESTRICTED_METADATA_CONN_HOST_KEY],
'wwpns': [x for x in wwpnlist if x],
'phy_to_virt_initiators': None}
except Exception:
connector = None
return connector
def _restrict_metadata_cleanup_connector(self, context, volid):
""" remove the connector related info from restricted metadata"""
try:
metadata = paxes_db.volume_restricted_metadata_get(context,
volid)
except Exception:
return
# TODO: need to try DB2 SQL matching to delete all the
# matching entries in one transaction. Should be something like
# "connector_%" for the filter.
for key, value in metadata.iteritems():
if (RESTRICTED_METADATA_CONN_HOST_KEY in key or
RESTRICTED_METADATA_CONN_WWPN_KEY in key):
paxes_db.volume_restricted_metadata_delete(
context, volid, key)
def _restrict_metadata_save_connector(self, context, volume, connector):
""" Update volume restricted metadata with connector once hostmap
has been established during volume attach transaction. The multihost
mapping for a single volume scenario is ignored here.
"connector_host": <host> from connector
"connector_wwpn_0: <wwpn>
"connector_wwpn_1: <wwpn>
...
"""
if (not connector or not connector.get("wwpns") or
not volume or volume['status'] != 'attaching'):
return
# TODO: handle the multihost volume mapping for IVM. Currently
# is it not handling the incomplete attach transaction during the
# IVM LPM window.
self._restrict_metadata_cleanup_connector(context, volume)
metadata = {RESTRICTED_METADATA_CONN_HOST_KEY: connector.get('host')}
index = 0
for wwpn in connector['wwpns']:
key = RESTRICTED_METADATA_CONN_WWPN_KEY + '_' + str(index)
metadata[key] = wwpn
index += 1
paxes_db.volume_restricted_metadata_update_or_create(
context, volume['id'], metadata)
def _volume_attach_rollback(self, context, volid):
connector = None
try:
connector = self._restricted_metadata_get_connector(context,
volid)
except exception.VolumeNotFound:
LOG.warn(_("_volume_attach_rollback: Volume %(id)s is missing "
"restricted metadata to recover the attaching state.") %
dict(id=volid))
if connector is None:
LOG.info(_("_volume_attach_rollback: No attach operation "
" to rollback for volume %(id)s") %
dict(id=volid))
else:
if not connector['wwpns']:
# empty wwpns.
LOG.warn(_("_volume_attach_rollback: Invalid connector "
"%(conn)s in volume %(id)s restrict metadata. "
"Rollback without termination") %
dict(conn=connector, id=volid))
else:
LOG.info(_("_volume_attach_rollback: Roll back attaching "
"state for volume %(id)s.") % dict(id=volid))
try:
self.terminate_connection(context, volid, connector)
# remove the connector after rollback.
self._restrict_metadata_cleanup_connector(context, volid)
except Exception as ex:
LOG.debug("_volume_attach_rollback: Failed to terminate "
"attach for volume %(id)s. Error: %(error)s" %
dict(id=volid, error=ex))
message = ("The volume attach didn't finish successfully "
"before cinder service gets restarted or volume delete.")
metadata = {'Attach Failure description':
to_value(message)}
# volume status will be set log_errors
log_errors(self, context, metadata, {}, False,
volid, broadcasts=None)
self.db.volume_update(context, volid,
{'status': 'available'})
LOG.info(_("_volume_attach_rollback: Finished. Volume %(id)s has "
"rolled back to available state.") % dict(id=volid))
def _volumes_post_init_update(self, context):
"""
Post init_host volume processing. For the volumes that have
left in the transitional state, volume manager will try to
clean up or reverse the state as much as possible.
"""
volumes = self.db.volume_get_all_by_host(context, self.host)
volid_del = []
volid_att = []
for vol in volumes:
if vol['status'] == 'resume-deleting':
# change it to pending-deleting
self.db.volume_update(context, vol['id'],
{'status': 'deleting'})
volid_del.append(vol['id'])
elif vol['status'] == 'attaching':
# if the volume is in the attaching state, that means nova
# didn't get successful return from cinder volume attach.
# The BDM will not be updated with the connection_info.
# It is not safe to set the volume to in-use state even
# though the volume may has been mapped on storage controller.
# Without connection information in BDM on Nova side, nova
# won't know how to handle the attach volume.
# PowerVC will save each successful initialize_connection into
# volume restricted metadata, and clean it up at the
# end of volume_attach. As long as we see volume in attaching
# state and there is a left over connector, we know we
# have an unfinished volume attach and terminate the connection
# with the connector and roll back to the Available state.
# change to error_attaching state to make sure no retry
self.db.volume_update(context, vol['id'],
{'status': 'error_attaching'})
volid_att.append(vol['id'])
elif vol['status'] == 'creating':
# It is unknown to init_host() whether volume create request
# is still pending to be consumed by volume service or it
# actually failed before the service has been restarted.
# Set it to error state. If volume create is still pending,
# volume service will retry the create volume and it will
# correct the state afterwards. Otherwise it is a true error
# state.
if not vol.get('source_volid', None):
# this is a volume create.
message = ("The volume create didn't finish successfully "
"before cinder service was restarted.")
metadata = {'Create Failure description':
to_value(message)}
broadcasts = None
# volume status will be set log_errors
log_errors(self, context, metadata, {}, True,
vol['id'], broadcasts=broadcasts)
else:
# clone failure.
message = ("The volume clone from source volume %(id)s "
" didn't finish successfully before cinder "
"service gets restarted." %
dict(id=vol.get('source_volid')))
metadata = {'Create Failure description':
to_value(message)}
broadcasts = None
# volume status will be set log_errors
log_errors(self, context, metadata, {}, True,
vol['id'], broadcasts=broadcasts)
elif vol['status'] == 'detaching':
# The volume detach transaction didn't finish before
# volume service gets restarted. And volume_detached()
# function hasn't been invoked during the transaction.
# There are several thing we can assume here:
# 1. Nova BDM table entry for this attached volume hasn't
# been deleted.
# 2. It is unknown in init_host() whether terminate_connection
# has been called or not with existing volume driver api.
# 3. detach_volume may be still pending.
# In order to get the volume back to a manageable state,
# it is safe to set the volume state back to "in-use" and
# user can retry another detach from Nova. If connection
# has been termianted, there is no side effect to retry
# terminate_connection.
LOG.info(_("The volume detach didn't finish successfully "
"before cinder service was restarted. volume id: "
"%(id)s") % dict(id=vol['id']))
message = ("The volume detach didn't finish successfully "
"before cinder service was restarted. Please try "
"again to detach the volume.")
metadata = {'Detach Failure description': to_value(message)}
log_errors(self, context, metadata, {}, False,
vol['id'], broadcasts=None)
self.db.volume_update(context, vol['id'],
{'status': 'in-use'})
# extending state is not a long run transaction and
# it is an synchronous call. It has a relatively small window
# to be caught during cinder service restart. And it needs
# code to remember the current size and desired size to roll
# back transaction or roll forward. Will leave it as a risk
# for now.
return volid_del, volid_att
def init_host(self):
"""Override Initialize Host to set the State to Error on Exception"""
context = cinder.context.get_admin_context()
self._mask_deleting_volumes(context)
try:
# cinder has zone manager which conflicts with current
# paxes zone manager. Set zoning_mode to 'none' to prevent
# cinder zone manager from being loaded.
saved_zone_mode = self.configuration.safe_get('zoning_mode')
self.configuration.local_conf.zoning_mode = 'none'
super(PowerVCVolumeManager, self).init_host()
if not self.driver.initialized:
LOG.error(_('Unable to initialize host, driver is '
'uninitialized'))
volumes = self.db.volume_get_all_by_host(context, self.host)
if not volumes:
os.kill(os.getppid(), SIGTERM)
os.kill(os.getpid(), SIGTERM)
driver_name = self.driver.__class__.__name__
raise exception.DriverNotInitialized(driver=driver_name)
# restore zoning_mode
self.configuration.local_conf.zoning_mode = saved_zone_mode
self.zonemanager = None
except Exception:
with excutils.save_and_reraise_exception():
# If we got an exception, update the Storage Node to Error
self._sync_storage_node(context, {}, 'error')
self._unmask_deleting_volumes(context)
LOG.info(_("update default quota class..."))
self.set_volume_type_quota(context)
LOG.info(_('Post init_host processing and update volume status.'))
try:
volids_del, volids_att = self._volumes_post_init_update(context)
except Exception as ex:
LOG.error(_("Error happened during post init_host volume "
"processing. Error: %s") % ex)
if volids_att:
LOG.info(_('Rollback volume still attaching during init_host: '
'%d volumes') % len(volids_att))
greenthread.spawn(self._init_host_rollback_attach_volumes, context,
volids_att)
if volids_del:
LOG.info(_('Resume volume deletion during init_host: %d volumes')
% len(volids_del))
greenthread.spawn(self._init_host_delete_volumes, context,
volids_del)
def set_volume_type_quota(self, context, volume_type=None):
""" Update the volume type based default quota, uses default
volume type if vol_type_name is not specified."""
if not volume_type:
try:
volume_type = self.get_default_vol_type(context)
except Exception:
vtn = CONF.default_volume_type
volume_type = vtn.decode('utf-8') if vtn else vtn
vol_type_quota = {}
volumes_quota = "volumes_%s" % volume_type
vol_type_quota[volumes_quota] = CONF.quota_volumes
snapshots_quota = "snapshots_%s" % volume_type
vol_type_quota[snapshots_quota] = CONF.quota_snapshots
gigabytes_quota = "gigabytes_%s" % volume_type
vol_type_quota[gigabytes_quota] = CONF.quota_gigabytes
default_quota_class = db_api.quota_class_get_default(context)
quota_class = default_quota_class.get('class_name', 'default')
for key, value in vol_type_quota.iteritems():
try:
db_api.quota_class_update(context, quota_class, key, value)
except exception.QuotaClassNotFound:
db_api.quota_class_create(context, quota_class, key, value)
def update_service_capabilities(self, capabilities):
"""Override update to populate the Storage Node in the Database"""
context = cinder.context.get_admin_context()
self.last_status_refresh = timeutils.utcnow()
# Call the Parent class for the normal Service Capability Processing
super(PowerVCVolumeManager,
self).update_service_capabilities(capabilities)
# Create/Update the Storage Node entry in the Database
self._sync_storage_node(context, capabilities, 'running')
@periodic_task.periodic_task
def _refresh_driver_stats_if_needed(self, context):
"""Attempt to Refresh the Driver Status if the Parent hasn't"""
# If the Parent already refreshed the Status in the last 5 minutes,
# then we don't want to do anything here and just continue on
if timeutils.is_older_than(self.last_status_refresh, 300):
self._refresh_driver_stats(context)
def _refresh_driver_stats(self, context):
"""Attempt to Refresh the Volume Statistics from the Driver"""
try:
self._report_driver_status(context)
except Exception as exc:
LOG.warn(_('Caught Exception Refreshing Driver Status: %s')
% exc)
LOG.exception(exc)
# If we got an exception, update the Storage Node to Error
self._sync_storage_node(context, {}, 'error')
def _sync_storage_node(self, ctxt, stats, state):
"""Override update to populate the Storage Node in the Database"""
try:
# First we want to convert the Volume Stats to a Storage Node
values = self._build_storage_node_values(stats, self.host, state)
# If we haven't cached the Storage Node yet, see if it exists
if not self.storage_node:
self.storage_node = \
paxes_db.storage_node_get_by_host(ctxt, self.host)
create = self.storage_node is None
changed = self._storage_node_changed(self.storage_node, values)
# Notify that we are starting to create/update the Storage Node
if create or changed or self.first_storage_node_sync:
self._notify_storage_node_update(ctxt, values, 'start', create)
# If the Storage Node doesn't exist in the DB, we need to create it
if not self.storage_node:
service = self._service_get_by_host(ctxt, self.host)
# Only create the Storage Node if the Service already exists
if service:
values['service_id'] = service['id']
self.storage_node = \
paxes_db.storage_node_create(ctxt, values)
# Else the Storage Node is already in the DB, so just update it
elif changed:
self.storage_node = paxes_db.storage_node_update(
ctxt, self.storage_node['id'], values)
# Notify that we are done creating/updating the Storage Node
if create or changed or self.first_storage_node_sync:
self._notify_storage_node_update(ctxt, values, 'end', create)
except Exception as exc:
LOG.warn(_('Caught Exception trying to update Storage Node in DB'))
LOG.exception(exc)
# Update to say we have Synchronized the Storage Node the First Time
# We want to always send a Notification each time we Start in order
# to account for Volume Type, etc being updated that aren't in the DB
self.first_storage_node_sync = False
def _notify_storage_node_update(self, context,
values, postfix, creating):
"""Helper Method to Notify we are Creating/Updating Storage Node"""
event_type = 'storage.node.create.'
info = {'host': self.host, 'storage_hostname': self.host}
info['host_display_name'] = CONF.host_display_name
info['default_volume_type'] = self._get_default_volume_type_id(context)
# If this is an update, we want to notify the caller of what we updated
if not creating:
event_type = 'storage.node.update.'
# If the Storage Node was created, we can get the ID and Service ID
if self.storage_node:
info['storage_node_id'] = self.storage_node['id']
info['service_id'] = self.storage_node['service_id']
info.update(values)
# Send the notification that we are Creating/Updating the Compute Node
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(context, event_type + postfix, info)
def _get_default_volume_type_id(self, context):
"""Helper method to get the Identifier of the Default Volume Type"""
# We will cache the ID of the Volume Type when we first retrieve it
if self.default_volume_type_id is None:
# Only retrieve it if the Volume Type Name is in the CONF file
if CONF.default_volume_type is not None:
try:
voltype = volume_types.get_volume_type_by_name(
context, CONF.default_volume_type.decode('utf-8'))
self.default_volume_type_id = voltype['id']
# Log an exception if we couldn't retrieve the Volume Type
except Exception as exc:
LOG.warn(_('Unable to retrieve Default Volume Type'))
LOG.exception(exc)
return self.default_volume_type_id
@staticmethod
def _service_get_by_host(context, host):
"""Helper method to retrieve the Service instance from the DB."""
try:
return db_api.service_get_by_host_and_topic(
context, host, cfg.CONF.volume_topic)
except exception.ServiceNotFound:
return None
@staticmethod
def _build_storage_node_values(volume_stats, host, state):
"""Helper method to construct the Node attribute to populate in DB."""
stat_keys = ['total_capacity_gb', 'free_capacity_gb', 'volume_count']
values = {'storage_hostname': host}
values['backend_id'] = CONF.host_uniqueid
values['backend_type'] = CONF.host_type
values['backend_state'] = state
for key in stat_keys:
if volume_stats.get(key) is not None:
values[key] = volume_stats.get(key)
return values
@staticmethod
def _storage_node_changed(storage_node, values):
"""Helper method to see if a value really would change in the DB."""
if storage_node is None:
return True
for key, val in values.iteritems():
if storage_node.get(key) != val:
return True
return False
########################################################
######### Volume Direct-Access Implementation ##########
########################################################
def _validate_disk_prefix(self, disk_prefix):
"""Helper method to check disk prefix against our allowed list."""
if not (disk_prefix in cfg.CONF.san_allowed_prefixes):
raise DirectAccessWithBadPrefixException(prefix=disk_prefix)
@lockutils.synchronized('delete_san_disk', 'cinder-', external=True)
def delete_san_disk(self, context, disk_prefix, disk_postfix):
"""Deletes the disk with the specified name from the SAN.
The name is in two parts - disk_prefix is checked against the list
of allowed prefixes, and if it passes, it is concatenated with
disk_postfix and passed to the driver.
"""
# Check that the disk has an allowed prefix.
self._validate_disk_prefix(disk_prefix)
volume_ref = {'name': disk_prefix + disk_postfix}
try:
LOG.debug(_("volume %s: removing export"), volume_ref['name'])
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
except exception.VolumeIsBusy:
LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
self.driver.ensure_export(context, volume_ref)
except exception.VolumeBackendAPIException:
LOG.debug(_("Fail to remove_export or delete_volume: %s" %
volume_ref['name']))
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug(_("Volume %s: deletion failed with exception ") %
volume_ref['name'])
# not going to update DB since it is not a Cinder volume..
self.publish_service_capabilities(context)
# This space freed up is not tracked by Quotas.
return True
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
"""
1) Set the allow_reschedule option to be False, because it looks as if
it isn't working quite right - we only have one storage controller
anyway, and if reschedule is True, we see the volume go into the
"Available" state after the reschedule, even if there was an error.
2) Retry if the driver was initializing
3) Refresh driver stats when we're done
"""
try:
s = super(PowerVCVolumeManager, self)
f = lambda: s.create_volume(context,
volume_id,
request_spec=request_spec,
filter_properties=filter_properties,
allow_reschedule=False,
snapshot_id=snapshot_id,
image_id=image_id,
source_volid=source_volid)
result = retry_during_init(f, 'create volume %s' % volume_id)
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
with excutils.save_and_reraise_exception():
missing_source = False
# if there is a problem with the source volume, we need
# to note that as well as the failure to create a new vol
if source_volid and isinstance(e,
paxes_exception.SVCVdiskNotFoundException):
missing_volid = e.volume_id
if (missing_volid == source_volid):
missing_source = True
# log/update/notify regarding the source volume.
# For build_metadata, we still want to associate any
# notification messages with the new volume_id, even
# if it was the source volume that was in error.
(metadata, is_vol_err, broadcasts) = \
build_metadata(e, 'Clone', volume_id)
log_errors(self, context, metadata, {},
True, source_volid,
broadcasts=broadcasts)
# TODO: add similar support for SnapshotNotFound?
# log/update/notify regarding the new volume
if missing_source:
message = ("The source volume no longer exists on the "
"storage controller.")
metadata = {'Create Failure description':
to_value(message)}
broadcasts = None
else:
(metadata, is_vol_err, broadcasts) = \
build_metadata(e, 'Create', volume_id)
log_errors(self, context, metadata, {}, True,
volume_id, broadcasts=broadcasts)
# Make sure to refresh Capacity Statistics after the Volume Creation
self._refresh_driver_stats(cinder.context.get_admin_context())
return result
def delete_volume(self, context, volume_id, unmanage_only=False):
"""
Simple wrapper for parent delete_volume method, so we can a) prevent
delete of image volumes and b) catch exceptions and log them in the
volume metadata.
"""
image = None
metadata = None
try:
# Don't allow delete for image volumes unless that image no
# longer exists
metadata = self.db.volume_metadata_get(context, volume_id)
if metadata and ('is_image_volume' in metadata and
bool(metadata['is_image_volume'])):
image_service, image_id = \
glance.get_remote_image_service(context,
metadata['image_id'])
try:
image = image_service.show(context.elevated(), image_id)
except exception.ImageNotFound:
pass # this is actually what we want here
except Exception as e:
LOG.exception(_('failed checking whether \
volume %(vol_id)s backs an image,'
' metadata=%(mdata)s')
% dict(vol_id=volume_id,
mdata=metadata))
if image is not None and image.get('status') != 'deleted':
self.db.volume_update(context, volume_id,
{'status': 'available'})
message = "Cannot delete image-backing volume"
metadata = {'Delete Failure description': to_value(message)}
log_errors(self, context, metadata, {}, False, volume_id)
return False
# raise exception.InvalidVolume(reason=message)
try:
connector = None
try:
connector = self._restricted_metadata_get_connector(context,
volume_id)
except Exception:
pass
if connector:
# There are unfinished volume attach transaction.
# the nova compute manager will not save the connector
# until reserve_volume->initialize_connection->volume_attach
# flow is done. Then change from attaching to in-use.
# If the instance delete request comes in the middle of this
# attach flow, the nova compute clean up function will
# unreserve the volume to available since the volume
# hasn't been attached and there is no information available
# to detach. Fortunately, paxes has transaction for
# attach process. We can safely terminate the connection
# during volume delete if there is any outstanding attach
# transaction. Otherwise, volume delete will fail with SVC
# exception due to existing host mapping. zone will be
# cleaned up as well during roll back.
LOG.warn(_("volume %(volid)s has unfinished attach "
"transaction. Rolling back before deleting "
"the volume.") % dict(volid=volume_id))
self._volume_attach_rollback(context, volume_id)
# Call the method in our superclass
s = super(PowerVCVolumeManager, self)
f = lambda: s.delete_volume(
context, volume_id, unmanage_only=unmanage_only)
result = retry_during_init(f, 'delete volume %s' % volume_id)
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
# that we caught.
with excutils.save_and_reraise_exception():
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Delete', volume_id)
log_errors(self, context, metadata, {}, is_volume_error,
volume_id, broadcasts=broadcasts)
# Make sure to refresh Capacity Statistics after the Volume Deletion
self._refresh_driver_stats(cinder.context.get_admin_context())
return result
def ibm_extend_volume(self, context, volume, new_size):
volume_id = volume['id']
volume_ref = self.db.volume_get(context, volume['id'])
size_increase = (int(new_size)) - volume['size']
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(
context, 'volume.extend.start', dict(volume_id=volume_id))
try:
reservations = QUOTAS.reserve(context, gigabytes=+size_increase)
except exception.OverQuota as exc:
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
quotas = exc.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
# not setting the volume to error as volume not set to extending
if 'gigabytes' in overs:
msg = _("Quota exceeded for %(s_pid)s, "
"tried to extend volume by "
"%(s_size)sG, (%(d_consumed)dG of %(d_quota)dG "
"already consumed)")
LOG.warn(msg % {'s_pid': context.project_id,
's_size': size_increase,
'd_consumed': _consumed('gigabytes'),
'd_quota': quotas['gigabytes']})
raise exception.VolumeSizeExceedsAvailableQuota()
self.db.volume_update(context, volume_id, {'status': 'extending'})
try:
f = lambda: self._ibm_extend_volume(volume_ref, new_size)
retry_during_init(f, 'ibm extend volume %s' % volume_id)
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(
context, 'volume.extend.end', dict(volume_id=volume_id))
except Exception as ex:
try:
if isinstance(ex,
paxes_exception.PVCExpendvdiskFCMapException):
# he action failed because the virtual
# disk (VDisk) is part of a FlashCopy mapping
# no change will be made to the boot volume
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
raise ex
elif isinstance(ex, processutils.ProcessExecutionError):
if ("CMMVC5860E" in ex.stderr or
# CMMVC5860E : The action failed because there were
# not enough extents in the managed disk group
"CMMVC6973E" in ex.stderr or
# CMMVC6973E: The command cannot be initiated
# because the maximum number of extents for a VDisk
# would be exceeded.
"CMMVC6541E" in ex.stderr):
# The task cannot be initiated because the virtual
# capacity that you have requested for the VDisk
# is larger than the maximum capacity that is
# supported for the extent size.
# There is no change has been made. Just exceeds
# size limit.
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
ex_args = {'host': self.host,
'volume_id': volume_id}
raise paxes_exception.\
PVCExpendvdiskCapacityException(**ex_args)
# if trying to extend volume that doesn't support resize
# and it is attached, just restore the in-use state.
# And log the NotImplementedError in the volume metadata.
elif isinstance(ex, NotImplementedError):
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
msg = _("The host %(hostid)s for boot volume %(volid)s "
" does not support extend volume.")
LOG.warn(msg % {'volid': volume_id,
'hostid': self.host})
raise NotImplementedError(msg %
{'volid': volume_id,
'hostid': self.host})
# real volume error state for all the other cases
# put the volume in error state.
self.db.volume_update(context, volume_id,
{'status': 'error_extending'})
(metadata, is_volume_error, broadcasts) = \
build_metadata(ex, 'Extending volume', volume_id)
# volume status has been set to error state, don't overwrite
log_errors(self, context, metadata, {}, False,
volume_id, broadcasts=broadcasts)
raise ex
finally:
QUOTAS.rollback(context, reservations)
self.db.volume_update(context, volume_id, {'size': new_size})
QUOTAS.commit(context, reservations)
self.db.volume_update(context, volume_id, {'status': 'in-use'})
# Make sure to refresh Capacity Statistics after the Volume Extension
self._refresh_driver_stats(cinder.context.get_admin_context())
def _ibm_extend_volume(self, volume_ref, new_size):
utils.require_driver_initialized(self.driver)
self.driver.extend_volume(volume_ref, new_size)
def initialize_connection(self, context, volume_id, connector):
"""
Simple wrapper for parent initialize_connection method, so we can catch
exceptions and log them in the volume metadata.
"""
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(
context, 'volume.attach.start', dict(volume_id=volume_id))
try:
# Call the method in our superclass
s = super(PowerVCVolumeManager, self)
f = lambda: s.initialize_connection(context, volume_id, connector)
info = retry_during_init(f, 'initialize connection %s' % volume_id)
# If we have a successful attach, then we clear out any prior
# errors.
self._clear_errors(context, volume_id, 'Attach')
# volume attach transaction started successfully. Save
# the connector in the restrict metadata. It will be
# cleaned at the end of attach_volume.
volume = self.db.volume_get(context, volume_id)
volume_meta = self.db.volume_metadata_get(context,volume_id)
if 'data' in info and volume_meta.has_key('is_boot_volume'):
info['data']['is_boot_volume'] = volume_meta['is_boot_volume']
self._restrict_metadata_save_connector(context, volume, connector)
LOG.info(_("Start attach transaction for volume %(id)s.") %
dict(id=volume_id))
return info
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
# that we caught.
with excutils.save_and_reraise_exception():
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Attach', volume_id)
extra_metadata = {'Attach Failure, connection request':
str(connector)[:255]}
log_errors(self, context, metadata, extra_metadata,
is_volume_error, volume_id, broadcasts=broadcasts)
def terminate_connection(self, context, volume_id, connector, force=False):
"""
Simple wrapper for parent terminate_connection method, so we can catch
exceptions and log them in the volume metadata.
"""
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(
context, 'volume.detach.start', dict(volume_id=volume_id))
try:
# Call the method in our superclass
s = super(PowerVCVolumeManager, self)
f = lambda: s.terminate_connection(context, volume_id, connector,
force=force)
info = retry_during_init(f, 'terminate connection %s' % volume_id)
LOG.debug(_("Driver returned %s") % info)
# If we have a successful detach, then we clear out any prior
# errors
self._clear_errors(context, volume_id, 'Detach')
# terminate_connection doesn't return any data.
return None
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
# that we caught.
with excutils.save_and_reraise_exception():
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Detach', volume_id)
extra_metadata = {'Detach Failure, connection request':
str(connector)[:255]}
log_errors(self, context, metadata, extra_metadata,
is_volume_error, volume_id, broadcasts=broadcasts)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""
Simple wrapper for parent attach_volume method, so we can add
a messaging notification.
"""
try:
# Call the method in our superclass
s = super(PowerVCVolumeManager, self)
f = lambda: s.attach_volume(context, volume_id, instance_uuid,
host_name, mountpoint, mode)
result = retry_during_init(f, 'attach volume %s' % volume_id)
# The volume attach transaction finished successfully. Clean
# up the saved connector in the restricted metadata.
LOG.info(_("Successfully attached the volume %(id)s. Clean up "
"transaction.") % dict(id=volume_id))
self._restrict_metadata_cleanup_connector(context, volume_id)
anotifier = rpc.get_notifier('volume', host_name)
anotifier.info(
context, 'volume.attach.end',
dict(volume_id=volume_id, instance_id=instance_uuid,
host_name=host_name, mountpoint=mountpoint, mode=mode))
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
# that we caught.
with excutils.save_and_reraise_exception():
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Attach', volume_id)
extra_metadata = {
'Attach Failure, db update args':
'Volume %s, Server %s' % (volume_id, instance_uuid)}
log_errors(self, context, metadata, extra_metadata,
is_volume_error, volume_id, broadcasts=broadcasts)
return result
def detach_volume(self, context, volume_id):
"""
Simple wrapper for parent detach_volume method, so we can add
a messaging notification.
"""
try:
# Call the method in our superclass
s = super(PowerVCVolumeManager, self)
f = lambda: s.detach_volume(context, volume_id)
retry_during_init(f, 'detach volume %s' % volume_id)
anotifier = rpc.get_notifier('volume', self.host)
anotifier.info(
context, 'volume.detach.end', dict(volume_id=volume_id))
except Exception as e:
# Log the error in Volume metadata, then re-raise the exception
# that we caught.
with excutils.save_and_reraise_exception():
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Detach', volume_id)
extra_metadata = {
'Detach Failure, db update args': 'Volume %s' % volume_id}
log_errors(self, context, metadata, extra_metadata,
is_volume_error, volume_id, broadcasts=broadcasts)
try:
volume = self.db.volume_get(context, volume_id)
self.driver.check_volume_health(volume)
except Exception as e:
(metadata, is_volume_error, broadcasts) = \
build_metadata(e, 'Detach', volume_id)
extra_metadata = {
'Detach Failure, db update args': 'Volume %s' % volume_id}
log_errors(self, context, metadata, extra_metadata,
is_volume_error, volume_id, broadcasts=broadcasts)
########################################################
######### Volume Discovery Implementation ##########
########################################################
def discover_volumes(self, context, filters=None):
"""Returns a list of all of the Volumes that exist on the Host"""
# Currently we won't throw an exception if it isn't a Discover Driver
if not isinstance(self.driver, discovery_driver.VolumeDiscoveryDriver):
drvclass = self.driver.__class__.__name__
LOG.warn(_('Driver %s does not implement Discover Driver')
% drvclass)
return {'identifier': None, 'volumes': [], 'chunking': False}
# Call the Volume Discovery Driver to get a list of existing Volumes
all_volumes = self._discover_volumes(context, filters)
# We need to modify the Instances returned from the Driver slightly
self._manipulate_driver_volumes(all_volumes, False)
# Break up the list of Volumes to a set of Chunks to be returned
identifier = self._persist_volumes_chunks(all_volumes)
# Return the first chunk of persisted volumes to the caller
return self.get_next_volumes_chunk(context, identifier)
def query_volumes(self, context, volume_ids,
extra_parms={}, allow_unsupported=False):
"""Returns details about the Volumes that were requested"""
return_all = '*all' in volume_ids
match_volumes, query_volumes = (list(), list())
# First we need to figure out what Volumes really exist on the Host
filters = self._parse_host_filters(extra_parms)
all_volumes = self._discover_volumes(context, filters)
uuid_map = dict([(vol_id, '') for vol_id in volume_ids])
# Loop through each Volumes on the Host, seeing if we should query
for volume in all_volumes:
# See if they requested all instances or this specific one
if return_all or volume['uuid'] in uuid_map:
support = volume.get('support', {})
supported = support.get('status', 'supported')
# If this is managed, then no need to do a query, just return
if volume.get('managed') is True:
volume.pop('connection_info', None)
match_volumes.append(volume)
# If this isn't supported and no override, just return it
elif not allow_unsupported and supported != 'supported':
volume.pop('connection_info', None)
match_volumes.append(volume)
# Otherwise it is supported/un-managed and a match so query
else:
volume.pop('support', volume.pop('managed', None))
query_volumes.append(volume)
# Only worth calling the Driver if some VM's exist on the System
if len(query_volumes) > 0:
mark_boot = extra_parms.get('has_boot', {})
server_info = extra_parms.get('server_info', {})
volumes = self.driver.query_volumes(
context, query_volumes, server_info, mark_boot)
match_volumes.extend(volumes)
# We need to modify the Volume returned from the Driver slightly
self._manipulate_driver_volumes(match_volumes, True)
# Break up the list of Volumes to a set of Chunks to be returned
identifier = self._persist_volumes_chunks(match_volumes)
# Return the first chunk of persisted volumes to the caller
return self.get_next_volumes_chunk(context, identifier)
@lockutils.synchronized('volume_chunking', 'cinder-')
def get_next_volumes_chunk(self, context, identifier):
"""Provides Chunking of Volume Lists to avoid QPID Limits"""
volume_chunks = self.volume_chunk_map.get(identifier)
# If the Identifier doesn't exist, we will just return an empty list
if volume_chunks is None:
return dict(identifier=identifier, volumes=[], chunking=False)
# If this is the last chunk (or no chunking), just return that list
if len(volume_chunks) == 1:
self.volume_chunk_map.pop(identifier, None)
return dict(identifier=identifier,
volumes=volume_chunks[0], chunking=False)
# Otherwise return the first chunk and say that there are more left
self.volume_chunk_map[identifier] = volume_chunks[1:]
return dict(identifier=identifier,
volumes=volume_chunks[0], chunking=True)
def verify_host_running(self, context):
"""Verifies the cinder-volume service for the Host is running"""
return True
def _discover_volumes(self, context, filters=None):
"""Helper Method to list all of the Volumes that exist on the Host"""
# Call the Volume Discovery Driver to get a list of existing Volumes
drv_volumes = self.driver.discover_volumes(context, filters)
# Generate the UUID's for the Volumes and determine which are Managed
self._generate_volume_uuids(context, drv_volumes, filters)
return drv_volumes
@lockutils.synchronized('volume_chunking', 'cinder-')
def _persist_volumes_chunks(self, volumes):
"""Internal Helper method to generate the UUID's for the Volumes"""
current_len = 0
current_list, volume_chunks = (list(), list())
# First get an identifier to be used to reference this chunking
identifier = str(self.volume_chunk_counter)
self.volume_chunk_counter = self.volume_chunk_counter + 1
# Loop through each volume, breaking it up into chunks based on size
while len(volumes) > 0:
volume = volumes.pop(0)
volume_len = len(str(volume))
# If we could possibly go over the 64K Message size, break up
if len(current_list) > 0 and (current_len + volume_len) > 40000:
volume_chunks.append(current_list)
current_list = []
current_len = 0
# Add this volume to the current chunk that is going to be returned
current_len = current_len + volume_len
current_list.append(volume)
# Add the final chunk to the overall set of chunks for the volume list
volume_chunks.append(current_list)
self.volume_chunk_map[identifier] = volume_chunks
return identifier
def _manipulate_driver_volumes(self, drv_volumes, query):
"""Internal Helper method to modify attributes on the Volumes"""
for volume in drv_volumes:
volume['id'] = volume.pop('uuid', None)
if query:
volume.pop('storage_pool', None)
else:
volume.pop('restricted_metadata', None)
volume.pop('connection_info', None)
def _generate_volume_uuids(self, context, drv_volumes, filters):
"""Internal Helper method to generate the UUID's for the Volumes"""
uuid_map, provid_map = (dict(), dict())
db_volumes = self.db.volume_get_all_by_host(context, self.host)
# Loop through DB Volumes, construct UUID/VDisk Maps for lookup
for db_volume in db_volumes:
volid = db_volume['id']
uuid_map[volid] = db_volume
# Try to retrieve the Metadata out of the DB for the VOlume
result = paxes_db.volume_restricted_metadata_get(context, volid)
prov_id = self._get_provider_identifier(result)
if prov_id is not None:
provid_map[prov_id] = db_volume
# Loop through the Volumes generating UUID's for any that need one
for drv_vol in drv_volumes:
managed = False
# If the Driver returned UUID, see if the Volume is managed
if drv_vol.get('uuid') is not None:
db_vol = uuid_map.get(drv_vol['uuid'])
if db_vol is not None:
drv_vol['name'] = db_vol['display_name']
inst_uuid = db_vol.get('instance_uuid')
# Mark it as managed unless this is an Attach and it is not
managed = not filters or inst_uuid is not None
# If the UUID isn't set, do a secondary lookup to see if it exists
else:
metadata = drv_vol.get('restricted_metadata')
prov_id = self._get_provider_identifier(metadata, '!')
db_vol = provid_map.get(prov_id)
# If an existing match based on provider id, use that UUID
if db_vol is not None:
drv_vol['uuid'] = db_vol['id']
drv_vol['name'] = db_vol['display_name']
inst_uuid = db_vol.get('instance_uuid')
# Mark it as managed unless this is an Attach and it is not
managed = not filters or inst_uuid is not None
# Otherwise it isn't managed, so we need to generate one
else:
namesp = uuid.UUID('a0dd4880-6115-39d6-b26b-77df18fe749f')
namestr = self._get_unique_volume_str(drv_vol, prov_id)
drv_vol['uuid'] = str(uuid.uuid3(namesp, namestr))
# Set the Managed attribute based on what we determined earlier
drv_vol['managed'] = managed
def _parse_host_filters(self, extra_parms):
"""Internal Helper Method to parse Host Refs from the Server Info"""
host_refs = list()
# If there were any Filters provided, just return those filters
if extra_parms.get('filters') is not None:
return extra_parms['filters']
# If there weren't any attached servers, nothing to filter on
if not extra_parms.get('server_info'):
return None
# Loop through each of the Servers provided, parsing out the Volumes
for server in extra_parms['server_info'].values():
volumes = server.get('volumes', [])
# Loop through the list of WWPN's and UniqueDeviceID's identifiers
for volume in volumes:
if volume.get('unique_device_id') is not None:
host_refs.append(volume['unique_device_id'])
if volume.get('wwpns') is not None:
host_refs = host_refs + volume['wwpns']
return dict(host_refs=host_refs)
def _get_unique_volume_str(self, volume, prov_id):
"""Internal Helper method to create a unique string for the volume"""
volume_name = volume.get('name', '')
return "ibm-paxes://volume/host='%s',id='%s',name='%s'" % \
(self.host, str(prov_id), str(volume_name))
@staticmethod
def _get_provider_identifier(restricted_metadata, default=None):
"""Internal Helper method to parse the provider id out of meta-data"""
if restricted_metadata is None:
return default
id_keys = ['lu_udid', 'vdisk_uid']
# Try to find the Identifier attribute out of the Meta-data
for key, val in restricted_metadata.iteritems():
if key in id_keys:
return val
return default
def _clear_errors(self, context, volume_id, prefix):
"""
Removes the metadata keys that we added when an error occurred.
We use this to clear out errors if we subsequently perform a
successful operation
"""
keys_to_delete = [prefix + ' Failure, exit code ',
prefix + ' Failure, stdout',
prefix + ' Failure, stderr',
prefix + ' Failure, failing command',
prefix + ' Failure description']
# For attach failures we added this additional piece of information
# that we should clean up.
if prefix == 'Attach':
keys_to_delete.append('Attach Failure, connection request')
vol_metadata = self.db.volume_metadata_get(context, volume_id)
if vol_metadata:
# Remove all keys, if they exist
for key in keys_to_delete:
vol_metadata.pop(key, None)
# Prepare the update
new_data = {'metadata': vol_metadata}
# Update the database
self.db.volume_update(context, volume_id, new_data)
def exist_volumes(self, context):
lvs = self.driver.exist_volumes()
return lvs
| windskyer/k_cinder | paxes_cinder/volume/manager.py | Python | apache-2.0 | 73,959 |
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import division
from __future__ import unicode_literals
from mo_times.dates import Date
from mo_times.durations import DAY
from tests.test_jx import BaseTestCase, TEST_TABLE
FROM_DATE = Date.today()-7*DAY
TO_DATE = Date.today()
simple_test_data =[
{"run":{"timestamp": Date("now-4day"), "value": 1}},
{"run":{"timestamp": Date("now-4day"), "value": 2}},
{"run":{"timestamp": Date("now-4day"), "value": 3}},
{"run":{"timestamp": Date("now-4day"), "value": 4}},
{"run":{"timestamp": Date("now-3day"), "value": 5}},
{"run":{"timestamp": Date("now-3day"), "value": 6}},
{"run":{"timestamp": Date("now-3day"), "value": 7}},
{"run":{"timestamp": Date("now-2day"), "value": 8}},
{"run":{"timestamp": Date("now-2day"), "value": 9}},
{"run":{"timestamp": Date("now-1day"), "value": 0}},
{"run":{"timestamp": Date("now-5day"), "value": 1}},
{"run":{"timestamp": Date("now-5day"), "value": 2}},
{"run":{"timestamp": Date("now-5day"), "value": 3}},
{"run":{"timestamp": Date("now-5day"), "value": 4}},
{"run":{"timestamp": Date("now-5day"), "value": 5}},
{"run":{"timestamp": Date("now-6day"), "value": 6}},
{"run":{"timestamp": Date("now-6day"), "value": 7}},
{"run":{"timestamp": Date("now-6day"), "value": 8}},
{"run":{"timestamp": Date("now-6day"), "value": 9}},
{"run":{"timestamp": Date("now-6day"), "value": 0}},
{"run":{"timestamp": Date("now-6day"), "value": 1}},
{"run":{"timestamp": Date("now-0day"), "value": 2}},
{"run":{"timestamp": Date("now-0day"), "value": 3}},
{"run":{"timestamp": Date("now-0day"), "value": 4}},
{"run":{"timestamp": Date("now-0day"), "value": 5}}
]
class TestEdge1(BaseTestCase):
def test_count_over_time(self):
test = {
"data": simple_test_data,
"query": {
"from": TEST_TABLE,
"edges": [
{
"value": "run.timestamp",
"allowNulls": False,
"domain": {
"type": "time",
"min": "today-week",
"max": "today",
"interval": "day"
}
}
]
},
"expecting_list": {
"meta": {"format": "list"},
"data": [
{"run": {"timestamp": (FROM_DATE + 0 * DAY).unix}, "count": 0},
{"run": {"timestamp": (FROM_DATE + 1 * DAY).unix}, "count": 6},
{"run": {"timestamp": (FROM_DATE + 2 * DAY).unix}, "count": 5},
{"run": {"timestamp": (FROM_DATE + 3 * DAY).unix}, "count": 4},
{"run": {"timestamp": (FROM_DATE + 4 * DAY).unix}, "count": 3},
{"run": {"timestamp": (FROM_DATE + 5 * DAY).unix}, "count": 2},
{"run": {"timestamp": (FROM_DATE + 6 * DAY).unix}, "count": 1}
]},
"expecting_table": {
"meta": {"format": "table"},
"header": ["run.timestamp", "count"],
"data": [
[(FROM_DATE + 0 * DAY).unix, 0],
[(FROM_DATE + 1 * DAY).unix, 6],
[(FROM_DATE + 2 * DAY).unix, 5],
[(FROM_DATE + 3 * DAY).unix, 4],
[(FROM_DATE + 4 * DAY).unix, 3],
[(FROM_DATE + 5 * DAY).unix, 2],
[(FROM_DATE + 6 * DAY).unix, 1]
]
},
"expecting_cube": {
"meta": {"format": "cube"},
"edges": [
{
"name": "run.timestamp",
"domain": {
"type": "time",
"key": "min",
"partitions": [{"dataIndex": i, "min": m.unix, "max": (m + DAY).unix} for i, m in enumerate(Date.range(FROM_DATE, TO_DATE, DAY))],
"min": FROM_DATE.unix,
"max": TO_DATE.unix,
"interval": DAY.seconds
}
}
],
"data": {
"count": [0, 6, 5, 4, 3, 2, 1]
}
}
}
self.utils.execute_tests(test)
| maggienj/ActiveData | tests/test_jx/test_edge_time.py | Python | mpl-2.0 | 4,700 |
#!/usr/bin/env python3
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
from builtins import open as _open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, n), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| wdv4758h/ZipPy | lib-python/3/tarfile.py | Python | bsd-3-clause | 92,256 |
import contextlib
import gzip
import hashlib
import io
import mmap
from builtins import (
map as imap,
)
def gzip_compress(data, compresslevel=6):
compressed = io.BytesIO()
with gzip.GzipFile(fileobj=compressed,
mode="wb",
compresslevel=compresslevel) as compressor:
compressor.write(data)
return compressed.getvalue()
def hash_file(fn, hn):
h = hashlib.new(hn)
with open(fn, "r") as fh:
with contextlib.closing(mmap.mmap(fh.fileno(), 0, prot=mmap.PROT_READ)) as mm:
h.update(mm)
return h.digest()
def indent(text, spaces):
spaces = " " * int(spaces)
return "\n".join(imap(lambda l: spaces + l, text.splitlines()))
| nanshe-org/nanshe_workflow | nanshe_workflow/util.py | Python | apache-2.0 | 733 |
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import os
import sys
import ast
import subprocess
import logging
from collections import defaultdict
import unitem.seq_io as seq_io
from unitem.defaults import *
def run_cmd(cmd, program=None):
"""Run external command."""
logger = logging.getLogger('timestamp')
logger.info(f"Executing: {cmd}")
try:
proc = subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8')
progress_strs = ['Finished processing',
'Finished parsing']
while True:
out = proc.stdout.readline()
if not out and proc.poll() is not None:
break
if out.rstrip():
# HACK: if output is logging information for a dependency, remove
# the logging information so it isn't shown twice
if ' INFO: ' in out:
out = out.split(' INFO: ')[1]
if any([p in out for p in progress_strs]):
# HACK: CheckM progress bars so write to stdout without newline
print(f'{out.rstrip()}\r', end='')
elif program:
logger.info(f'[{program}] {out.rstrip()}')
else:
logger.info(out.rstrip())
if proc.returncode != 0:
logger.error(f'Return code: {proc.returncode}')
sys.exit(1)
except Exception as e:
print(e)
logger.error('Failed to execute command.')
sys.exit(1)
def calculateN50L50M50(seqs):
"""Calculate N50 and M50 statistics."""
seq_lens = [len(seq) for seq in seqs]
thresholdN50 = sum(seq_lens) / 2
seq_lens.sort(reverse=True)
test_sum = 0
L50 = 0
N50 = 0
for seq_len in seq_lens:
L50 += 1
test_sum += seq_len
if test_sum >= thresholdN50:
N50 = seq_len
break
M50 = len(seqs) - L50
if test_sum != thresholdN50:
M50 += 1
return N50, L50, M50
def parse_checkm_bin_stats(checkm_dir):
"""Read bin statistics from file."""
bin_stats_file = os.path.join(
checkm_dir, 'storage', BIN_STATS_EXT_OUT)
bin_stats = {}
with open(bin_stats_file, 'r') as f:
for line in f:
line_split = line.split('\t')
bin_stats[line_split[0]] = ast.literal_eval(line_split[1])
return bin_stats
def parse_bin_stats(profile_dir):
"""Parse genomic and assembly statistics for bins."""
binning_methods_dir = os.path.join(profile_dir, BINNING_METHOD_DIR)
bin_stats = {}
for bm in os.listdir(binning_methods_dir):
checkm_dir = os.path.join(binning_methods_dir, bm, CHECKM_BAC_DIR)
bin_stats[bm] = parse_checkm_bin_stats(checkm_dir)
return bin_stats
def read_bins(bin_dirs):
"""Read sequences in bins."""
bins = defaultdict(lambda: defaultdict(set))
contigs = {}
contigs_in_bins = defaultdict(lambda: {})
for method_id, (bin_dir, bin_ext) in bin_dirs.items():
for bf in os.listdir(bin_dir):
if not bf.endswith(bin_ext):
continue
bin_id = bf[0:bf.rfind(bin_ext)]
if bin_id[-1] == '.':
bin_id = bin_id[0:-1]
bf_path = os.path.join(bin_dir, bf)
for seq_id, seq in seq_io.read_seq(bf_path):
bins[method_id][bin_id].add(seq_id)
contigs[seq_id] = seq
contigs_in_bins[seq_id][method_id] = bin_id
if len(bins[method_id][bin_id]) == 0:
logger = logging.getLogger('timestamp')
logger.warning('Bin {bf} from {method_id} is empty.')
return bins, contigs, contigs_in_bins
def count_nt(seq):
"""Count occurrences of each nucleotide in a sequence.
Only the bases A, C, G, and T(U) are counted. Ambiguous
bases are ignored.
Parameters
----------
seq : str
Nucleotide sequence.
Returns
-------
list
Number of A, C, G, and T(U) in sequence.
"""
s = seq.upper()
a = s.count('A')
c = s.count('C')
g = s.count('G')
t = s.count('T') + s.count('U')
return a, c, g, t
def bin_gc(bin):
"""Calculate GC-content of bin."""
a_count = 0
c_count = 0
g_count = 0
t_count = 0
for seq in bin.values():
a, c, g, t = count_nt(seq)
a_count += a
c_count += c
g_count += g
t_count += t
total = (a_count + c_count + g_count + t_count)
return (g_count + c_count) / total
| dparks1134/UniteM | unitem/common.py | Python | gpl-3.0 | 6,131 |
"""Tests for the Nest integration API glue library.
There are two interesting cases to exercise that have different strategies
for token refresh and for testing:
- API based requests, tested using aioclient_mock
- Pub/sub subcriber initialization, intercepted with patch()
The tests below exercise both cases during integration setup.
"""
import time
from unittest.mock import patch
from homeassistant.components.nest import DOMAIN
from homeassistant.components.nest.const import API_URL, OAUTH2_TOKEN, SDM_SCOPES
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from .common import (
CLIENT_ID,
CLIENT_SECRET,
CONFIG,
FAKE_REFRESH_TOKEN,
FAKE_TOKEN,
PROJECT_ID,
create_config_entry,
)
FAKE_UPDATED_TOKEN = "fake-updated-token"
async def async_setup_sdm(hass):
"""Set up the integration."""
assert await async_setup_component(hass, DOMAIN, CONFIG)
await hass.async_block_till_done()
async def test_auth(hass, aioclient_mock):
"""Exercise authentication library creates valid credentials."""
expiration_time = time.time() + 86400
create_config_entry(expiration_time).add_to_hass(hass)
# Prepare to capture credentials in API request. Empty payloads just mean
# no devices or structures are loaded.
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/structures", json={})
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/devices", json={})
# Prepare to capture credentials for Subscriber
captured_creds = None
async def async_new_subscriber(creds, subscription_name, loop, async_callback):
"""Capture credentials for tests."""
nonlocal captured_creds
captured_creds = creds
return None # GoogleNestSubscriber
with patch(
"google_nest_sdm.google_nest_subscriber.DefaultSubscriberFactory.async_new_subscriber",
side_effect=async_new_subscriber,
) as new_subscriber_mock:
await async_setup_sdm(hass)
# Verify API requests are made with the correct credentials
calls = aioclient_mock.mock_calls
assert len(calls) == 2
(method, url, data, headers) = calls[0]
assert headers == {"Authorization": f"Bearer {FAKE_TOKEN}"}
(method, url, data, headers) = calls[1]
assert headers == {"Authorization": f"Bearer {FAKE_TOKEN}"}
# Verify the susbcriber was created with the correct credentials
assert len(new_subscriber_mock.mock_calls) == 1
assert captured_creds
creds = captured_creds
assert creds.token == FAKE_TOKEN
assert creds.refresh_token == FAKE_REFRESH_TOKEN
assert int(dt.as_timestamp(creds.expiry)) == int(expiration_time)
assert creds.valid
assert not creds.expired
assert creds.token_uri == OAUTH2_TOKEN
assert creds.client_id == CLIENT_ID
assert creds.client_secret == CLIENT_SECRET
assert creds.scopes == SDM_SCOPES
async def test_auth_expired_token(hass, aioclient_mock):
"""Verify behavior of an expired token."""
expiration_time = time.time() - 86400
create_config_entry(expiration_time).add_to_hass(hass)
# Prepare a token refresh response
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"access_token": FAKE_UPDATED_TOKEN,
"expires_at": time.time() + 86400,
"expires_in": 86400,
},
)
# Prepare to capture credentials in API request. Empty payloads just mean
# no devices or structures are loaded.
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/structures", json={})
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/devices", json={})
# Prepare to capture credentials for Subscriber
captured_creds = None
async def async_new_subscriber(creds, subscription_name, loop, async_callback):
"""Capture credentials for tests."""
nonlocal captured_creds
captured_creds = creds
return None # GoogleNestSubscriber
with patch(
"google_nest_sdm.google_nest_subscriber.DefaultSubscriberFactory.async_new_subscriber",
side_effect=async_new_subscriber,
) as new_subscriber_mock:
await async_setup_sdm(hass)
calls = aioclient_mock.mock_calls
assert len(calls) == 3
# Verify refresh token call to get an updated token
(method, url, data, headers) = calls[0]
assert data == {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
"grant_type": "refresh_token",
"refresh_token": FAKE_REFRESH_TOKEN,
}
# Verify API requests are made with the new token
(method, url, data, headers) = calls[1]
assert headers == {"Authorization": f"Bearer {FAKE_UPDATED_TOKEN}"}
(method, url, data, headers) = calls[2]
assert headers == {"Authorization": f"Bearer {FAKE_UPDATED_TOKEN}"}
# The subscriber is created with a token that is expired. Verify that the
# credential is expired so the subscriber knows it needs to refresh it.
assert len(new_subscriber_mock.mock_calls) == 1
assert captured_creds
creds = captured_creds
assert creds.token == FAKE_TOKEN
assert creds.refresh_token == FAKE_REFRESH_TOKEN
assert int(dt.as_timestamp(creds.expiry)) == int(expiration_time)
assert not creds.valid
assert creds.expired
assert creds.token_uri == OAUTH2_TOKEN
assert creds.client_id == CLIENT_ID
assert creds.client_secret == CLIENT_SECRET
assert creds.scopes == SDM_SCOPES
| rohitranjan1991/home-assistant | tests/components/nest/test_api.py | Python | mit | 5,461 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Initial no-op Yoga contract migration.
Revision ID: e25ffa003242
Revises: 27e647c0fad4
Create Date: 2022-01-21 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'e25ffa003242'
down_revision = '27e647c0fad4'
branch_labels = ('contract',)
def upgrade():
pass
| openstack/keystone | keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py | Python | apache-2.0 | 835 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db import backend
from django.db.models import Q
from geonode.security.models import UserObjectRoleMapping, GenericObjectRoleMapping
from geonode.security.enumerations import ANONYMOUS_USERS, AUTHENTICATED_USERS
from geonode.maps.models import Map
from geonode.documents.models import Document
from geonode.layers.models import Layer
from geonode.people.models import Profile
from geonode.base.models import TopicCategory, ResourceBase
from geonode.search import extension
from geonode.search.models import filter_by_period
from geonode.search.models import filter_by_extent
from geonode.search.models import using_geodjango
import operator
def _rank_rules(model, *rules):
# prefix field names with model's db table to avoid ambiguity
return [('"%s"."%s"' % (model._meta.db_table, r[0]), r[1], r[2])
for r in rules]
def _filter_results(l):
'''If the layer name doesn't match any of the patterns, it shows in the results'''
return not any(p.search(l['name']) for p in extension.exclude_regex)
def _filter_security(q, user, model, permission):
'''apply filters to the query that remove those model objects that are
not viewable by the given user based on row-level permissions'''
# superusers see everything
if user and user.is_superuser: return q
# resolve the model permission
ct = ContentType.objects.get_for_model(model)
p = Permission.objects.get(content_type=ct, codename=permission)
# apply generic role filters
generic_roles = [ANONYMOUS_USERS]
if user and not user.is_anonymous():
generic_roles.append(AUTHENTICATED_USERS)
grm = GenericObjectRoleMapping.objects.filter(object_ct=ct, role__permissions__in=[p], subject__in=generic_roles).values('object_id')
security = Q(id__in=grm)
# apply specific user filters
if user and not user.is_anonymous():
urm = UserObjectRoleMapping.objects.filter(object_ct=ct, role__permissions__in=[p], user=user).values('object_id')
security = security | Q(id__in=urm)
# if the user is the owner, make sure these are included
security = security | Q(owner=user)
return q.filter(security)
def _filter_category(q, categories):
_categories = []
for category in categories:
try:
_categories.append(TopicCategory.objects.get(identifier=category))
except TopicCategory.DoesNotExist:
# FIXME Do something here
pass
return q.filter(category__in=_categories)
def _add_relevance(query, rank_rules):
eq = """CASE WHEN %s = '%s' THEN %s ELSE 0 END"""
frag = """CASE WHEN position(lower('%s') in lower(%s)) >= 1 THEN %s ELSE 0 END"""
preds = []
preds.extend( [ eq % (r[0],query.query,r[1]) for r in rank_rules] )
preds.extend( [ frag % (query.query,r[0],r[2]) for r in rank_rules] )
words = query.split_query
if len(words) > 1:
for w in words:
preds.extend( [ frag % (w,r[0],r[2] / 2) for r in rank_rules] )
sql = " + ".join(preds)
return sql
def _safely_add_relevance(q, query, rank_rules):
# for unittests, it doesn't make sense to test this as it's postgres
# specific SQL - instead test/verify directly using a query and getting SQL
if 'sqlite' in backend.__name__: return q
sql = _add_relevance(query, rank_rules)
# ugh - work around bug
q = q.defer(None)
return q.extra(select={'relevance':sql})
def _build_map_layer_text_query(q, query, query_keywords=False):
'''Build an OR query on title and abstract from provided search text.
if query_keywords is provided, include a query on the keywords attribute if
specified.
return a Q object
'''
# title or abstract contains entire phrase
subquery = [Q(title__icontains=query.query),Q(abstract__icontains=query.query)]
# tile or abstract contains pieces of entire phrase
if len(query.split_query) > 1:
subquery.extend([Q(title__icontains=kw) for kw in query.split_query])
subquery.extend([Q(abstract__icontains=kw) for kw in query.split_query])
# or keywords match any pieces of entire phrase
if query_keywords and query.split_query:
subquery.append(_build_kw_only_query(query.split_query))
# if any OR phrases exists, build them
if subquery:
q = q.filter(reduce( operator.or_, subquery))
return q
def _build_kw_only_query(keywords):
return reduce(operator.or_, [Q(keywords__slug__contains=kw) for kw in keywords])
def _get_owner_results(query):
# make sure all contacts have a user attached
q = extension.owner_query(query)
if q is None: return None
if query.kw:
# hard to handle - not supporting at the moment
return Profile.objects.none()
if query.owner:
q = q.filter(user__username__icontains = query.owner)
if query.extent:
q = filter_by_extent(Map, q, query.extent, True) | \
filter_by_extent(Layer, q, query.extent, True)
if query.period:
q = filter_by_period(Map, q, *query.period, user=True) | \
filter_by_period(Layer, q, *query.period, user=True)
if query.added:
q = q.filter(user__date_joined__gt = query.added)
if query.query:
qs = Q(user__username__icontains=query.query) | \
Q(user__first_name__icontains=query.query) | \
Q(user__last_name__icontains=query.query)
for field in extension.owner_query_fields:
qs = qs | Q(**{'%s__icontains' % field: query.query})
q = q.filter(qs)
rules = _rank_rules(User,['username', 10, 5]) + \
_rank_rules(Profile,['organization', 5, 2])
added = extension.owner_rank_rules()
if added:
rules = rules + _rank_rules(*added)
q = _safely_add_relevance(q, query, rules)
return q.distinct()
def _get_map_results(query):
q = extension.map_query(query)
q = _filter_security(q, query.user, Map, 'view_map')
if query.owner:
q = q.filter(owner__username=query.owner)
if query.extent:
q = filter_by_extent(Map, q, query.extent)
if query.added:
q = q.filter(last_modified__gte=query.added)
if query.period:
q = filter_by_period(Map, q, *query.period)
if query.kw:
q = q.filter(_build_kw_only_query(query.kw))
if query.exclude:
q = q.exclude(reduce(operator.or_, [Q(title__contains=ex) for ex in query.exclude]))
if query.categories:
q = _filter_category(q, query.categories)
if query.query:
q = _build_map_layer_text_query(q, query, query_keywords=True)
rules = _rank_rules(ResourceBase,
['title',10, 5],
['abstract',5, 2],
)
q = _safely_add_relevance(q, query, rules)
return q.distinct()
def _get_layer_results(query):
q = extension.layer_query(query)
q = _filter_security(q, query.user, Layer, 'view_layer')
if extension.exclude_patterns:
name_filter = reduce(operator.or_,[ Q(name__regex=f) for f in extension.exclude_patterns])
q = q.exclude(name_filter)
if query.kw:
q = q.filter(_build_kw_only_query(query.kw))
if query.exclude:
q = q.exclude(reduce(operator.or_, [Q(title__contains=ex) for ex in query.exclude]))
if query.owner:
q = q.filter(owner__username=query.owner)
if query.extent:
q = filter_by_extent(Layer, q, query.extent)
if query.added:
q = q.filter(date__gte=query.added)
if query.period:
q = filter_by_period(Layer, q, *query.period)
if query.categories:
q = _filter_category(q, query.categories)
# this is a special optimization for pre-fetching results when requesting
# all records via search
# keywords and thumbnails cannot be pre-fetched at the moment due to
# the way the contenttypes are implemented
if query.limit == 0 and using_geodjango:
q = q.defer(None).prefetch_related("owner","spatial_temporal_index")
if query.query:
q = _build_map_layer_text_query(q, query, query_keywords=True) |\
q.filter(name__icontains=query.query) # map doesn't have name
rules = _rank_rules(ResourceBase,
['title',10, 5],
['abstract',5, 2],
)
q = _safely_add_relevance(q, query, rules)
return q.distinct()
def _get_document_results(query):
q = extension.document_query(query)
q = _filter_security(q, query.user, Document, 'view_document')
if extension.exclude_patterns:
name_filter = reduce(operator.or_,[ Q(name__regex=f) for f in extension.exclude_patterns])
q = q.exclude(name_filter)
if query.kw:
q = q.filter(_build_kw_only_query(query.kw))
if query.exclude:
q = q.exclude(reduce(operator.or_, [Q(title__contains=ex) for ex in query.exclude]))
if query.owner:
q = q.filter(owner__username=query.owner)
if query.extent:
q = filter_by_extent(Document, q, query.extent)
if query.added:
q = q.filter(date__gte=query.added)
if query.period:
q = filter_by_period(Document, q, *query.period)
if query.categories:
q = _filter_category(q, query.categories)
# this is a special optimization for pre-fetching results when requesting
# all records via search
# keywords and thumbnails cannot be pre-fetched at the moment due to
# the way the contenttypes are implemented
if query.limit == 0 and using_geodjango:
q = q.defer(None).prefetch_related("owner","spatial_temporal_index")
if query.query:
q = _build_map_layer_text_query(q, query, query_keywords=True)
rules = _rank_rules(ResourceBase,
['title',10, 5],
['abstract',5, 2],
)
q = _safely_add_relevance(q, query, rules)
return q.distinct()
def combined_search_results(query):
facets = dict([ (k,0) for k in ('map', 'layer', 'vector', 'raster', 'document', 'user')])
results = {'facets' : facets}
bytype = (None,) if u'all' in query.type else query.type
query.type = bytype
if None in bytype or u'map' in bytype:
q = _get_map_results(query)
facets['map'] = q.count()
results['maps'] = q
if None in bytype or u'layer' in bytype or u'raster' in bytype or u'vector' in bytype:
q = _get_layer_results(query)
if u'raster' in bytype and not u'vector' in bytype:
q = q.filter(storeType='coverageStore')
if u'vector' in bytype and not u'raster' in bytype:
q = q.filter(storeType='dataStore')
facets['layer'] = q.count()
facets['raster'] = q.filter(storeType='coverageStore').count()
facets['vector'] = q.filter(storeType='dataStore').count()
results['layers'] = q
if None in bytype or u'document' in bytype:
q = _get_document_results(query)
facets['document'] = q.count()
results['documents'] = q
if query.categories and len(query.categories) == TopicCategory.objects.count() or not query.categories:
if None in bytype or u'user' in bytype:
q = _get_owner_results(query)
facets['user'] = q.count()
results['users'] = q
return results
| AnnalisaS/migration_geonode | geonode/search/search.py | Python | gpl-3.0 | 12,257 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import sys,os
sys.path.append(__file__[:1+__file__.rfind(os.sep)] + (".."+os.sep)*1)
from Audio.PyMedia.Input import Input as PyMediaInput
| sparkslabs/kamaelia_ | Sketches/MH/pymedia/Audio/Input.py | Python | apache-2.0 | 1,047 |
#!/usr/bin/env python
#
# Copyright 2013 Hannes Juutilainen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from autopkglib import Processor, ProcessorError, URLGetter
try:
from plistlib import loads as plist_from_string
except ImportError:
from plistlib import readPlistFromString as plist_from_string
__all__ = ["AlfredURLProvider"]
# Update URLs for Alfred versions
# Found in "Alfred (version).app/Contents/Frameworks/Alfred
# Framework.framework/Versions/A/Alfred Framework"
UPDATE_URLS = {
"2": {
"stable": "https://cachefly.alfredapp.com/updater/info.plist",
"prerelease": "https://cachefly.alfredapp.com/updater/prerelease.plist",
},
"3": {
"stable": "https://www.alfredapp.com/app/update/general.xml",
"prerelease": "https://www.alfredapp.com/app/update/prerelease.xml",
},
"4": {
"stable": "https://www.alfredapp.com/app/update4/general.xml",
"prerelease": "https://www.alfredapp.com/app/update4/prerelease.xml",
},
}
DEFAULT_MAJOR_VERSION = "2"
DEFAULT_RELEASE_TYPE = "stable"
class AlfredURLProvider(URLGetter):
"""Provides a download URL for the latest Alfred."""
input_variables = {
"base_url": {
"required": False,
"description": "The Alfred update info property list URL",
},
"major_version": {
"required": False,
"description": "The Alfred major version to get. "
"The default value is %s. Possible values are: "
"'%s'" % (DEFAULT_MAJOR_VERSION, "', '".join(UPDATE_URLS)),
},
"release_type": {
"required": False,
"description": "The Alfred release type to get. "
"Possible values are 'stable' or 'prerelease'",
},
}
output_variables = {
"url": {"description": "URL to the latest Alfred release.",},
"version": {"description": "Version of the latest Alfred release.",},
}
description = __doc__
def download_info_plist(self, base_url):
"""Downloads the info.plist file and returns a plist object."""
f = self.download(base_url)
info_plist = plist_from_string(f)
return info_plist
def get_alfred_url(self, base_url):
"""Find and return a download URL for Alfred 2."""
# Alfred 2, 3, and 4 update check uses a standard plist file.
# If this changes in the future, we'll need to copy/adjust this method.
info_plist = self.download_info_plist(base_url)
version = info_plist.get("version", None)
self.env["version"] = version
self.output("Found version %s" % version)
location = info_plist.get("location", None)
return location
def main(self):
"""Main process."""
# Acquire input variables
major_version = self.env.get("major_version", DEFAULT_MAJOR_VERSION)
self.output("Major version is set to %s" % major_version)
release_type = self.env.get("release_type", DEFAULT_RELEASE_TYPE)
self.output("Release type is set to %s" % release_type)
# Validate inputs
if major_version not in UPDATE_URLS:
raise ProcessorError(
"Unsupported value for major_version: %s" % major_version
)
if release_type not in ("stable", "prerelease"):
raise ProcessorError(
"Unsupported value for release_type: %s" % release_type
)
# Get base URL depending on major version and release type
base_url = self.env.get("base_url", UPDATE_URLS[major_version][release_type])
self.output("Using URL %s" % base_url)
# Get download URL by parsing content of base URL
self.env["url"] = self.get_alfred_url(base_url)
self.output("Found URL %s" % self.env["url"])
if __name__ == "__main__":
PROCESSOR = AlfredURLProvider()
PROCESSOR.execute_shell()
| autopkg/hjuutilainen-recipes | AlfredApp/AlfredURLProvider.py | Python | mit | 4,480 |
# -*- coding: utf-8 -*-
# © 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_stock_valuation_extension
| esthermm/odoo-addons | stock_valuation_extension/tests/__init__.py | Python | agpl-3.0 | 171 |
#!/usr/bin/env python
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
elif event == 0:
pass
else:
print(event)
if self.drag_start:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 1)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
| dbadb/2016-Stronghold | src/org/usfirst/frc/team4915/stronghold/vision/jetson/imgExplore2/common.py | Python | mit | 6,318 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import importlib.resources
import itertools
import json
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from itertools import chain
from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Iterator, List, Tuple
import toml
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.core.goals.generate_lockfiles import DEFAULT_TOOL_LOCKFILE, GenerateLockfilesSubsystem
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import UnparsedAddressInputs
from pants.engine.collection import Collection
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
DigestContents,
DigestSubset,
FileContent,
FileDigest,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.internals.native_engine import EMPTY_DIGEST
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import CoarsenedTargets, Target, Targets
from pants.engine.unions import UnionRule
from pants.jvm.compile import (
ClasspathEntry,
ClasspathEntryRequest,
CompileResult,
FallibleClasspathEntry,
)
from pants.jvm.resolve import coursier_setup
from pants.jvm.resolve.common import (
ArtifactRequirement,
ArtifactRequirements,
Coordinate,
Coordinates,
GatherJvmCoordinatesRequest,
)
from pants.jvm.resolve.coursier_setup import Coursier, CoursierFetchProcess
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.resolve.lockfile_metadata import JVMLockfileMetadata, LockfileContext
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import (
JvmArtifactFieldSet,
JvmArtifactJarSourceField,
JvmArtifactTarget,
JvmResolveField,
)
from pants.jvm.util_rules import ExtractFileDigest
from pants.util.docutil import bin_name, doc_url
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list, pluralize
if TYPE_CHECKING:
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool
logger = logging.getLogger(__name__)
class CoursierFetchRequest(ClasspathEntryRequest):
field_sets = (JvmArtifactFieldSet,)
class CoursierError(Exception):
"""An exception relating to invoking Coursier or processing its output."""
class NoCompatibleResolve(Exception):
"""No compatible resolve could be found for a set of targets."""
def __init__(self, jvm: JvmSubsystem, msg_prefix: str, relevant_targets: Iterable[Target]):
resolves_to_addresses = defaultdict(list)
for tgt in relevant_targets:
if tgt.has_field(JvmResolveField):
resolve = tgt[JvmResolveField].normalized_value(jvm)
resolves_to_addresses[resolve].append(tgt.address.spec)
formatted_resolve_lists = "\n\n".join(
f"{resolve}:\n{bullet_list(sorted(addresses))}"
for resolve, addresses in sorted(resolves_to_addresses.items())
)
super().__init__(
f"{msg_prefix}:\n\n"
f"{formatted_resolve_lists}\n\n"
"Targets which will be merged onto the same classpath must share a resolve (from the "
f"[resolve]({doc_url('reference-deploy_jar#coderesolvecode')}) field)."
)
@dataclass(frozen=True)
class CoursierLockfileEntry:
"""A single artifact entry from a Coursier-resolved lockfile.
These fields are nearly identical to the JSON objects from the
"dependencies" entries in Coursier's --json-output-file format.
But unlike Coursier's JSON report, a CoursierLockfileEntry
includes the content-address of the artifact fetched by Coursier
and ingested by Pants.
For example, a Coursier JSON report dependency entry might look like this:
```
{
"coord": "com.chuusai:shapeless_2.13:2.3.3",
"file": "/home/USER/.cache/coursier/v1/https/repo1.maven.org/maven2/com/chuusai/shapeless_2.13/2.3.3/shapeless_2.13-2.3.3.jar",
"directDependencies": [
"org.scala-lang:scala-library:2.13.0"
],
"dependencies": [
"org.scala-lang:scala-library:2.13.0"
]
}
```
The equivalent CoursierLockfileEntry would look like this:
```
CoursierLockfileEntry(
coord="com.chuusai:shapeless_2.13:2.3.3", # identical
file_name="shapeless_2.13-2.3.3.jar" # PurePath(entry["file"].name)
direct_dependencies=(Coordinate.from_coord_str("org.scala-lang:scala-library:2.13.0"),),
dependencies=(Coordinate.from_coord_str("org.scala-lang:scala-library:2.13.0"),),
file_digest=FileDigest(fingerprint=<sha256 of the jar>, ...),
)
```
The fields `remote_url` and `pants_address` are set by Pants if the `coord` field matches a
`jvm_artifact` that had either the `url` or `jar` fields set.
"""
coord: Coordinate
file_name: str
direct_dependencies: Coordinates
dependencies: Coordinates
file_digest: FileDigest
remote_url: str | None = None
pants_address: str | None = None
@classmethod
def from_json_dict(cls, entry) -> CoursierLockfileEntry:
"""Construct a CoursierLockfileEntry from its JSON dictionary representation."""
return cls(
coord=Coordinate.from_json_dict(entry["coord"]),
file_name=entry["file_name"],
direct_dependencies=Coordinates(
Coordinate.from_json_dict(d) for d in entry["directDependencies"]
),
dependencies=Coordinates(Coordinate.from_json_dict(d) for d in entry["dependencies"]),
file_digest=FileDigest(
fingerprint=entry["file_digest"]["fingerprint"],
serialized_bytes_length=entry["file_digest"]["serialized_bytes_length"],
),
remote_url=entry.get("remote_url"),
pants_address=entry.get("pants_address"),
)
def to_json_dict(self) -> dict[str, Any]:
"""Export this CoursierLockfileEntry to a JSON object."""
return dict(
coord=self.coord.to_json_dict(),
directDependencies=[coord.to_json_dict() for coord in self.direct_dependencies],
dependencies=[coord.to_json_dict() for coord in self.dependencies],
file_name=self.file_name,
file_digest=dict(
fingerprint=self.file_digest.fingerprint,
serialized_bytes_length=self.file_digest.serialized_bytes_length,
),
remote_url=self.remote_url,
pants_address=self.pants_address,
)
@dataclass(frozen=True)
class CoursierResolvedLockfile:
"""An in-memory representation of Pants' Coursier lockfile format.
All coordinates in the resolved lockfile will be compatible, so we do not need to do version
testing when looking up coordinates.
"""
entries: tuple[CoursierLockfileEntry, ...]
metadata: JVMLockfileMetadata | None = None
@classmethod
def _coordinate_not_found(cls, key: CoursierResolveKey, coord: Coordinate) -> CoursierError:
# TODO: After fixing https://github.com/pantsbuild/pants/issues/13496, coordinate matches
# should become exact, and this error message will capture all cases of stale lockfiles.
return CoursierError(
f"{coord} was not present in resolve `{key.name}` at `{key.path}`.\n"
f"If you have recently added new `{JvmArtifactTarget.alias}` targets, you might "
f"need to update your lockfile by running `coursier-resolve --names={key.name}`."
)
def direct_dependencies(
self, key: CoursierResolveKey, coord: Coordinate
) -> tuple[CoursierLockfileEntry, tuple[CoursierLockfileEntry, ...]]:
"""Return the entry for the given Coordinate, and for its direct dependencies."""
entries = {(i.coord.group, i.coord.artifact): i for i in self.entries}
entry = entries.get((coord.group, coord.artifact))
if entry is None:
raise self._coordinate_not_found(key, coord)
return (entry, tuple(entries[(i.group, i.artifact)] for i in entry.direct_dependencies))
def dependencies(
self, key: CoursierResolveKey, coord: Coordinate
) -> tuple[CoursierLockfileEntry, tuple[CoursierLockfileEntry, ...]]:
"""Return the entry for the given Coordinate, and for its transitive dependencies."""
entries = {(i.coord.group, i.coord.artifact): i for i in self.entries}
entry = entries.get((coord.group, coord.artifact))
if entry is None:
raise self._coordinate_not_found(key, coord)
return (entry, tuple(entries[(i.group, i.artifact)] for i in entry.dependencies))
@classmethod
def from_toml(cls, lockfile: str | bytes) -> CoursierResolvedLockfile:
"""Constructs a CoursierResolvedLockfile from it's TOML + metadata comment representation.
The toml file should consist of an `[entries]` block, followed by several entries.
"""
lockfile_str: str
lockfile_bytes: bytes
if isinstance(lockfile, str):
lockfile_str = lockfile
lockfile_bytes = lockfile.encode("utf-8")
else:
lockfile_str = lockfile.decode("utf-8")
lockfile_bytes = lockfile
contents = toml.loads(lockfile_str)
entries = tuple(
CoursierLockfileEntry.from_json_dict(entry) for entry in (contents["entries"])
)
metadata = JVMLockfileMetadata.from_lockfile(lockfile_bytes)
return cls(
entries=entries,
metadata=metadata,
)
@classmethod
def from_serialized(cls, lockfile: str | bytes) -> CoursierResolvedLockfile:
"""Construct a CoursierResolvedLockfile from its serialized representation (either TOML with
attached metadata, or old-style JSON.)."""
return cls.from_toml(lockfile)
def to_serialized(self) -> bytes:
"""Export this CoursierResolvedLockfile to a human-readable serialized form.
This serialized form is intended to be checked in to the user's repo as a hermetic snapshot
of a Coursier resolved JVM classpath.
"""
lockfile = {
"entries": [entry.to_json_dict() for entry in self.entries],
}
return toml.dumps(lockfile).encode("utf-8")
def classpath_dest_filename(coord: str, src_filename: str) -> str:
"""Calculates the destination filename on the classpath for the given source filename and coord.
TODO: This is duplicated in `COURSIER_POST_PROCESSING_SCRIPT`.
"""
dest_name = coord.replace(":", "_")
_, ext = os.path.splitext(src_filename)
return f"{dest_name}{ext}"
@dataclass(frozen=True)
class CoursierResolveInfo:
coord_arg_strings: FrozenSet[str]
extra_args: tuple[str, ...]
digest: Digest
@property
def argv(self) -> Iterable[str]:
"""Return coursier arguments that can be used to compute or fetch this resolve.
Must be used in concert with `digest`.
"""
return itertools.chain(self.coord_arg_strings, self.extra_args)
@rule
async def prepare_coursier_resolve_info(
artifact_requirements: ArtifactRequirements,
) -> CoursierResolveInfo:
# Transform requirements that correspond to local JAR files into coordinates with `file:/`
# URLs, and put the files in the place specified by the URLs.
no_jars: List[ArtifactRequirement] = []
jars: List[Tuple[ArtifactRequirement, JvmArtifactJarSourceField]] = []
extra_args: List[str] = []
LOCAL_EXCLUDE_FILE = "PANTS_RESOLVE_EXCLUDES"
for req in artifact_requirements:
jar = req.jar
if not jar:
no_jars.append(req)
else:
jars.append((req, jar))
excludes = [
(req.coordinate, exclude)
for req in artifact_requirements
for exclude in (req.excludes or [])
]
excludes_digest = EMPTY_DIGEST
if excludes:
excludes_file_content = FileContent(
LOCAL_EXCLUDE_FILE,
"\n".join(
f"{coord.group}:{coord.artifact}--{exclude}" for (coord, exclude) in excludes
).encode("utf-8"),
)
excludes_digest = await Get(Digest, CreateDigest([excludes_file_content]))
extra_args += ["--local-exclude-file", LOCAL_EXCLUDE_FILE]
jar_files = await Get(SourceFiles, SourceFilesRequest(i[1] for i in jars))
jar_file_paths = jar_files.snapshot.files
resolvable_jar_requirements = [
dataclasses.replace(
req, jar=None, url=f"file:{Coursier.working_directory_placeholder}/{path}"
)
for req, path in zip((i[0] for i in jars), jar_file_paths)
]
to_resolve = chain(no_jars, resolvable_jar_requirements)
digest = await Get(Digest, MergeDigests([jar_files.snapshot.digest, excludes_digest]))
return CoursierResolveInfo(
coord_arg_strings=frozenset(req.to_coord_arg_str() for req in to_resolve),
digest=digest,
extra_args=tuple(extra_args),
)
@rule(level=LogLevel.DEBUG)
async def coursier_resolve_lockfile(
artifact_requirements: ArtifactRequirements,
) -> CoursierResolvedLockfile:
"""Run `coursier fetch ...` against a list of Maven coordinates and capture the result.
This rule does two things in a single Process invocation:
* Runs `coursier fetch` to let Coursier do the heavy lifting of resolving
dependencies and downloading resolved artifacts (jars, etc).
* Copies the resolved artifacts into the Process output directory, capturing
the artifacts as content-addressed `Digest`s.
It's important that this happens in the same process, since the process isn't
guaranteed to run on the same machine as the rule, nor is a subsequent process
invocation. This guarantees that whatever Coursier resolved, it was fully
captured into Pants' content addressed artifact storage.
Note however that we still get the benefit of Coursier's "global" cache if it
had already been run on the machine where the `coursier fetch` runs, so rerunning
`coursier fetch` tends to be fast in practice.
Finally, this rule bundles up the result into a `CoursierResolvedLockfile`. This
data structure encapsulates everything necessary to either materialize the
resolved dependencies to a classpath for Java invocations, or to write the
lockfile out to the workspace to hermetically freeze the result of the resolve.
"""
if len(artifact_requirements) == 0:
return CoursierResolvedLockfile(entries=())
coursier_resolve_info = await Get(
CoursierResolveInfo, ArtifactRequirements, artifact_requirements
)
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
CoursierFetchProcess(
args=(
coursier_report_file_name,
*coursier_resolve_info.argv,
),
input_digest=coursier_resolve_info.digest,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
description=(
"Running `coursier fetch` against "
f"{pluralize(len(artifact_requirements), 'requirement')}: "
f"{', '.join(req.to_coord_arg_str() for req in artifact_requirements)}"
),
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
artifact_file_names = tuple(
classpath_dest_filename(dep["coord"], dep["file"]) for dep in report["dependencies"]
)
artifact_output_paths = tuple(f"classpath/{file_name}" for file_name in artifact_file_names)
artifact_digests = await MultiGet(
Get(Digest, DigestSubset(process_result.output_digest, PathGlobs([output_path])))
for output_path in artifact_output_paths
)
stripped_artifact_digests = await MultiGet(
Get(Digest, RemovePrefix(artifact_digest, "classpath"))
for artifact_digest in artifact_digests
)
artifact_file_digests = await MultiGet(
Get(FileDigest, ExtractFileDigest(stripped_artifact_digest, file_name))
for stripped_artifact_digest, file_name in zip(
stripped_artifact_digests, artifact_file_names
)
)
first_pass_lockfile = CoursierResolvedLockfile(
entries=tuple(
CoursierLockfileEntry(
coord=Coordinate.from_coord_str(dep["coord"]),
direct_dependencies=Coordinates(
Coordinate.from_coord_str(dd) for dd in dep["directDependencies"]
),
dependencies=Coordinates(Coordinate.from_coord_str(d) for d in dep["dependencies"]),
file_name=file_name,
file_digest=artifact_file_digest,
)
for dep, file_name, artifact_file_digest in zip(
report["dependencies"], artifact_file_names, artifact_file_digests
)
)
)
inverted_artifacts = {req.coordinate: req for req in artifact_requirements}
new_entries = []
for entry in first_pass_lockfile.entries:
req = inverted_artifacts.get(entry.coord)
if req:
address = req.jar.address if req.jar else None
address_spec = address.spec if address else None
entry = dataclasses.replace(entry, remote_url=req.url, pants_address=address_spec)
new_entries.append(entry)
return CoursierResolvedLockfile(entries=tuple(new_entries))
@rule(desc="Fetch with coursier")
async def fetch_with_coursier(request: CoursierFetchRequest) -> FallibleClasspathEntry:
# TODO: Loading this per JvmArtifact.
lockfile = await Get(CoursierResolvedLockfile, CoursierResolveKey, request.resolve)
requirement = ArtifactRequirement.from_jvm_artifact_target(request.component.representative)
if lockfile.metadata and not lockfile.metadata.is_valid_for(
[requirement], LockfileContext.USER
):
raise ValueError(
f"Requirement `{requirement.to_coord_arg_str()}` has changed since the lockfile "
f"for {request.resolve.path} was generated. Run `{bin_name()} generate-lockfiles` to update your "
"lockfile based on the new requirements."
)
# All of the transitive dependencies are exported.
# TODO: Expose an option to control whether this exports only the root, direct dependencies,
# transitive dependencies, etc.
assert len(request.component.members) == 1, "JvmArtifact does not have dependencies."
root_entry, transitive_entries = lockfile.dependencies(
request.resolve,
requirement.coordinate,
)
classpath_entries = await MultiGet(
Get(ClasspathEntry, CoursierLockfileEntry, entry)
for entry in (root_entry, *transitive_entries)
)
exported_digest = await Get(Digest, MergeDigests(cpe.digest for cpe in classpath_entries))
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.SUCCEEDED,
output=ClasspathEntry.merge(exported_digest, classpath_entries),
exit_code=0,
)
class ResolvedClasspathEntries(Collection[ClasspathEntry]):
"""A collection of resolved classpath entries."""
@rule
async def coursier_fetch_one_coord(
request: CoursierLockfileEntry,
) -> ClasspathEntry:
"""Run `coursier fetch --intransitive` to fetch a single artifact.
This rule exists to permit efficient subsetting of a "global" classpath
in the form of a lockfile. Callers can determine what subset of dependencies
from the lockfile are needed for a given target, then request those
lockfile entries individually.
By fetching only one entry at a time, we maximize our cache efficiency. If instead
we fetched the entire subset that the caller wanted, there would be a different cache
key for every possible subset.
This rule also guarantees exact reproducibility. If all caches have been
removed, `coursier fetch` will re-download the artifact, and this rule will
confirm that what was downloaded matches exactly (by content digest) what
was specified in the lockfile (what Coursier originally downloaded).
"""
# Prepare any URL- or JAR-specifying entries for use with Coursier
req: ArtifactRequirement
if request.pants_address:
targets = await Get(
Targets, UnparsedAddressInputs([request.pants_address], owning_address=None)
)
req = ArtifactRequirement(request.coord, jar=targets[0][JvmArtifactJarSourceField])
else:
req = ArtifactRequirement(request.coord, url=request.remote_url)
coursier_resolve_info = await Get(
CoursierResolveInfo,
ArtifactRequirements([req]),
)
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
CoursierFetchProcess(
args=(
coursier_report_file_name,
"--intransitive",
*coursier_resolve_info.argv,
),
input_digest=coursier_resolve_info.digest,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
description=f"Fetching with coursier: {request.coord.to_coord_str()}",
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
report_deps = report["dependencies"]
if len(report_deps) == 0:
raise CoursierError("Coursier fetch report has no dependencies (i.e. nothing was fetched).")
elif len(report_deps) > 1:
raise CoursierError(
"Coursier fetch report has multiple dependencies, but exactly 1 was expected."
)
dep = report_deps[0]
resolved_coord = Coordinate.from_coord_str(dep["coord"])
if resolved_coord != request.coord:
raise CoursierError(
f'Coursier resolved coord "{resolved_coord.to_coord_str()}" does not match requested coord "{request.coord.to_coord_str()}".'
)
classpath_dest_name = classpath_dest_filename(dep["coord"], dep["file"])
classpath_dest = f"classpath/{classpath_dest_name}"
resolved_file_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([classpath_dest]))
)
stripped_digest = await Get(Digest, RemovePrefix(resolved_file_digest, "classpath"))
file_digest = await Get(
FileDigest,
ExtractFileDigest(stripped_digest, classpath_dest_name),
)
if file_digest != request.file_digest:
raise CoursierError(
f"Coursier fetch for '{resolved_coord}' succeeded, but fetched artifact {file_digest} did not match the expected artifact: {request.file_digest}."
)
return ClasspathEntry(digest=stripped_digest, filenames=(classpath_dest_name,))
@rule(level=LogLevel.DEBUG)
async def coursier_fetch_lockfile(lockfile: CoursierResolvedLockfile) -> ResolvedClasspathEntries:
"""Fetch every artifact in a lockfile."""
classpath_entries = await MultiGet(
Get(ClasspathEntry, CoursierLockfileEntry, entry) for entry in lockfile.entries
)
return ResolvedClasspathEntries(classpath_entries)
@rule
async def select_coursier_resolve_for_targets(
coarsened_targets: CoarsenedTargets, jvm: JvmSubsystem
) -> CoursierResolveKey:
"""Selects and validates (transitively) a single resolve for a set of roots in a compile graph.
In most cases, a `CoursierResolveKey` should be requested for a single `CoarsenedTarget` root,
which avoids coupling un-related roots unnecessarily. But in other cases, a single compatible
resolve is required for multiple roots (such as when running a `repl` over unrelated code), and
in that case there might be multiple CoarsenedTargets.
"""
targets = [t for ct in coarsened_targets.closure() for t in ct.members]
# Find a single resolve that is compatible with all targets in the closure.
compatible_resolve: str | None = None
all_compatible = True
for tgt in targets:
if not tgt.has_field(JvmResolveField):
continue
resolve = tgt[JvmResolveField].normalized_value(jvm)
if compatible_resolve is None:
compatible_resolve = resolve
elif resolve != compatible_resolve:
all_compatible = False
if not compatible_resolve or not all_compatible:
raise NoCompatibleResolve(
jvm, "The selected targets did not have a resolve in common", targets
)
resolve = compatible_resolve
# Load the resolve.
resolve_path = jvm.resolves[resolve]
lockfile_source = PathGlobs(
[resolve_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"The resolve `{resolve}` from `[jvm].resolves`",
)
resolve_digest = await Get(Digest, PathGlobs, lockfile_source)
return CoursierResolveKey(resolve, resolve_path, resolve_digest)
@rule
async def get_coursier_lockfile_for_resolve(
coursier_resolve: CoursierResolveKey,
) -> CoursierResolvedLockfile:
lockfile_digest_contents = await Get(DigestContents, Digest, coursier_resolve.digest)
lockfile_contents = lockfile_digest_contents[0].content
return CoursierResolvedLockfile.from_serialized(lockfile_contents)
@dataclass(frozen=True)
class ToolClasspathRequest:
"""A request to set up the classpath for a JVM tool by fetching artifacts and merging the
classpath.
:param prefix: if set, should be a relative directory that will
be prepended to every classpath element. This is useful for
keeping all classpath elements isolated under a single directory
in a process invocation, where other inputs on the process's
root directory might interfere with un-prefixed classpath
entries (or vice versa).
"""
prefix: str | None = None
lockfile: GenerateJvmLockfileFromTool | None = None
artifact_requirements: ArtifactRequirements = ArtifactRequirements()
def __post_init__(self) -> None:
if not bool(self.lockfile) ^ bool(self.artifact_requirements):
raise AssertionError(
f"Exactly one of `lockfile` or `artifact_requirements` must be provided: {self}"
)
@dataclass(frozen=True)
class ToolClasspath:
"""A fully fetched and merged classpath for running a JVM tool."""
content: Snapshot
@property
def digest(self) -> Digest:
return self.content.digest
def classpath_entries(self, root: str | None = None) -> Iterator[str]:
"""Returns optionally prefixed classpath entry filenames.
:param prefix: if set, will be prepended to all entries. This is useful
if the process working directory is not the same as the root
directory for the process input `Digest`.
"""
if root is None:
yield from self.content.files
return
for file_name in self.content.files:
yield os.path.join(root, file_name)
@rule(level=LogLevel.DEBUG)
async def materialize_classpath_for_tool(request: ToolClasspathRequest) -> ToolClasspath:
if request.artifact_requirements:
resolution = await Get(
CoursierResolvedLockfile, ArtifactRequirements, request.artifact_requirements
)
else:
lockfile_req = request.lockfile
assert lockfile_req is not None
regen_command = f"`{GenerateLockfilesSubsystem.name} --resolve={lockfile_req.resolve_name}`"
if lockfile_req.lockfile_dest == DEFAULT_TOOL_LOCKFILE:
lockfile_bytes = importlib.resources.read_binary(
*lockfile_req.default_lockfile_resource
)
resolution = CoursierResolvedLockfile.from_serialized(lockfile_bytes)
else:
lockfile_snapshot = await Get(Snapshot, PathGlobs([lockfile_req.lockfile_dest]))
if not lockfile_snapshot.files:
raise ValueError(
f"No lockfile found at {lockfile_req.lockfile_dest}, which is configured "
f"by the option {lockfile_req.lockfile_option_name}."
f"Run {regen_command} to generate it."
)
resolution = await Get(
CoursierResolvedLockfile,
CoursierResolveKey(
name=lockfile_req.resolve_name,
path=lockfile_req.lockfile_dest,
digest=lockfile_snapshot.digest,
),
)
# Validate that the lockfile is correct.
lockfile_inputs = await Get(
ArtifactRequirements,
GatherJvmCoordinatesRequest(
lockfile_req.artifact_inputs, lockfile_req.artifact_option_name
),
)
if resolution.metadata and not resolution.metadata.is_valid_for(
lockfile_inputs, LockfileContext.TOOL
):
raise ValueError(
f"The lockfile {lockfile_req.lockfile_dest} (configured by the option "
f"{lockfile_req.lockfile_option_name}) was generated with different requirements "
f"than are currently set via {lockfile_req.artifact_option_name}. Run "
f"{regen_command} to regenerate the lockfile."
)
classpath_entries = await Get(ResolvedClasspathEntries, CoursierResolvedLockfile, resolution)
merged_snapshot = await Get(
Snapshot, MergeDigests(classpath_entry.digest for classpath_entry in classpath_entries)
)
if request.prefix is not None:
merged_snapshot = await Get(Snapshot, AddPrefix(merged_snapshot.digest, request.prefix))
return ToolClasspath(merged_snapshot)
def rules():
return [
*collect_rules(),
*coursier_setup.rules(),
UnionRule(ClasspathEntryRequest, CoursierFetchRequest),
]
| pantsbuild/pants | src/python/pants/jvm/resolve/coursier_fetch.py | Python | apache-2.0 | 30,567 |
#
# CORE
# Copyright (c)2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
#
# Copyright (c) 2014 Benocs GmbH
#
# author: Robert Wuttke <robert@benocs.com>
#
# See the LICENSE file included in this distribution.
#
'''
conf.py: common support for configurable objects
'''
import string
from core.api import coreapi
from functools import reduce
class ConfigurableManager(object):
''' A generic class for managing Configurables. This class can register
with a session to receive Config Messages for setting some parameters
for itself or for the Configurables that it manages.
'''
# name corresponds to configuration object field
_name = ""
# type corresponds with register message types
_type = None
def __init__(self, session=None):
self.session = session
self.session.addconfobj(self._name, self._type, self.configure)
# Configurable key=values, indexed by node number
self.configs = {}
def configure(self, session, msg):
''' Handle configure messages. The configuration message sent to a
ConfigurableManager usually is used to:
1. Request a list of Configurables (request flag)
2. Reset manager and clear configs (reset flag)
3. Send values that configure the manager or one of its
Configurables
Returns any reply messages.
'''
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
return self.configure_request(msg)
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
if objname == "all" or objname == self._name:
return self.configure_reset(msg)
else:
return self.configure_values(msg,
msg.gettlv(coreapi.CORE_TLV_CONF_VALUES))
def configure_request(self, msg):
''' Request configuration data.
'''
return None
def configure_reset(self, msg):
''' By default, resets this manager to clear configs.
'''
return self.reset()
def configure_values(self, msg, values):
''' Values have been sent to this manager.
'''
return None
def configure_values_keyvalues(self, msg, values, target, keys):
''' Helper that can be used for configure_values for parsing in
'key=value' strings from a values field. The key name must be
in the keys list, and target.key=value is set.
'''
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
# key=value
(key, value) = kv.split('=', 1)
except ValueError:
# value only
key = keys[kvs.index(kv)]
value = kv
if key not in keys:
raise ValueError("invalid key: %s" % key)
setattr(target, key, value)
return None
def reset(self):
return None
def setconfig(self, nodenum, conftype, values):
''' add configuration values for a node to a dictionary; values are
usually received from a Configuration Message, and may refer to a
node for which no object exists yet
'''
conflist = []
if nodenum in self.configs:
oldlist = self.configs[nodenum]
found = False
for (t, v) in oldlist:
if (t == conftype):
# replace existing config
found = True
conflist.append((conftype, values))
else:
conflist.append((t, v))
if not found:
conflist.append((conftype, values))
else:
conflist.append((conftype, values))
self.configs[nodenum] = conflist
def getconfig(self, nodenum, conftype, defaultvalues):
''' get configuration values for a node; if the values don't exist in
our dictionary then return the default values supplied
'''
if nodenum in self.configs:
# return configured values
conflist = self.configs[nodenum]
for (t, v) in conflist:
if (conftype is None) or (t == conftype):
return (t, v)
# return default values provided (may be None)
return (conftype, defaultvalues)
def getallconfigs(self, use_clsmap=True):
''' Return (nodenum, conftype, values) tuples for all stored configs.
Used when reconnecting to a session.
'''
r = []
for nodenum in self.configs:
for (t, v) in self.configs[nodenum]:
if use_clsmap:
t = self._modelclsmap[t]
r.append( (nodenum, t, v) )
return r
def clearconfig(self, nodenum):
''' remove configuration values for the specified node;
when nodenum is None, remove all configuration values
'''
if nodenum is None:
self.configs = {}
return
if nodenum in self.configs:
self.configs.pop(nodenum)
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
''' keyvalues list of tuples
'''
if conftype not in self._modelclsmap:
self.warn("Unknown model type '%s'" % (conftype))
return
model = self._modelclsmap[conftype]
keys = model.getnames()
# defaults are merged with supplied values here
values = list(model.getdefaultvalues())
for key, value in keyvalues:
if key not in keys:
self.warn("Skipping unknown configuration key for %s: '%s'" % \
(conftype, key))
continue
i = keys.index(key)
values[i] = value
self.setconfig(nodenum, conftype, values)
def getmodels(self, n):
''' Return a list of model classes and values for a net if one has been
configured. This is invoked when exporting a session to XML.
This assumes self.configs contains an iterable of (model-names, values)
and a self._modelclsmapdict exists.
'''
r = []
if n.objid in self.configs:
v = self.configs[n.objid]
for model in v:
cls = self._modelclsmap[model[0]]
vals = model[1]
r.append((cls, vals))
return r
def info(self, msg):
self.session.info(msg)
def warn(self, msg):
self.session.warn(msg)
class Configurable(object):
''' A generic class for managing configuration parameters.
Parameters are sent via Configuration Messages, which allow the GUI
to build dynamic dialogs depending on what is being configured.
'''
_name = ""
# Configuration items:
# ('name', 'type', 'default', 'possible-value-list', 'caption')
_confmatrix = []
_confgroups = None
_bitmap = None
def __init__(self, session=None, objid=None):
self.session = session
self.objid = objid
def reset(self):
pass
def register(self):
pass
@classmethod
def getdefaultvalues(cls):
return tuple( [x[2] for x in cls._confmatrix] )
@classmethod
def getnames(cls):
return tuple( [x[0] for x in cls._confmatrix] )
@classmethod
def configure(cls, mgr, msg):
''' Handle configuration messages for this object.
'''
reply = None
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
if mgr.verbose:
mgr.info("received configure message for %s" % cls._name)
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
if mgr.verbose:
mgr.info("replying to configure request for %s model" %
cls._name)
# when object name is "all", the reply to this request may be None
# if this node has not been configured for this model; otherwise we
# reply with the defaults for this model
if objname == "all":
defaults = None
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
else:
defaults = cls.getdefaultvalues()
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
if values is None:
# node has no active config for this model (don't send defaults)
return None
# reply with config options
reply = cls.toconfmsg(0, nodenum, typeflags, values)
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
if objname == "all":
mgr.clearconfig(nodenum)
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
else:
# store the configuration values for later use, when the node
# object has been created
if objname is None:
mgr.info("no configuration object for node %s" % nodenum)
return None
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
defaults = cls.getdefaultvalues()
if values_str is None:
# use default or preconfigured values
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
else:
# use new values supplied from the conf message
values = values_str.split('|')
# determine new or old style config
new = cls.haskeyvalues(values)
if new:
new_values = list(defaults)
keys = cls.getnames()
for v in values:
key, value = v.split('=', 1)
try:
new_values[keys.index(key)] = value
except ValueError:
mgr.info("warning: ignoring invalid key '%s'" % key)
values = new_values
mgr.setconfig(nodenum, objname, values)
return reply
@classmethod
def toconfmsg(cls, flags, nodenum, typeflags, values):
''' Convert this class to a Config API message. Some TLVs are defined
by the class, but node number, conf type flags, and values must
be passed in.
'''
keys = cls.getnames()
keyvalues = list(map(lambda a,b: "%s=%s" % (a,b), keys, values))
values_str = '|'.join(keyvalues)
tlvdata = b""
if nodenum is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
cls._name)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
typeflags)
datatypes = tuple( [x[1] for x in cls._confmatrix] )
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
datatypes)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
values_str)
captions = reduce( lambda a,b: a + '|' + b, \
[x[4] for x in cls._confmatrix])
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
captions)
possiblevals = reduce( lambda a,b: a + '|' + b, \
[x[3] for x in cls._confmatrix])
tlvdata += coreapi.CoreConfTlv.pack(
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
if cls._bitmap is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
cls._bitmap)
if cls._confgroups is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
cls._confgroups)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
@staticmethod
def booltooffon(value):
''' Convenience helper turns bool into on (True) or off (False) string.
'''
if value == "1" or value == "true" or value == "on":
return "on"
else:
return "off"
@staticmethod
def offontobool(value):
if type(value) == str:
if value.lower() == "on":
return 1
elif value.lower() == "off":
return 0
return value
def valueof(self, name, values):
''' Helper to return a value by the name defined in confmatrix.
Checks if it is boolean'''
i = self.getnames().index(name)
if self._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \
values[i] != "":
return self.booltooffon( values[i] )
else:
return values[i]
@staticmethod
def haskeyvalues(values):
''' Helper to check for list of key=value pairs versus a plain old
list of values. Returns True if all elements are "key=value".
'''
if len(values) == 0:
return False
for v in values:
if "=" not in v:
return False
return True
def getkeyvaluelist(self):
''' Helper to return a list of (key, value) tuples. Keys come from
self._confmatrix and values are instance attributes.
'''
r = []
for k in self.getnames():
if hasattr(self, k):
r.append((k, getattr(self, k)))
return r
| Benocs/core | src/daemon/core/conf.py | Python | bsd-3-clause | 14,235 |
import workflow
"""
PublishPerfectArticle workflow
"""
class workflow_PublishPerfectArticle(workflow.workflow):
def __init__(self, settings, logger, conn=None, token=None, decision=None,
maximum_page_size=100):
workflow.workflow.__init__(self, settings, logger, conn, token, decision, maximum_page_size)
# SWF Defaults
self.name = "PublishPerfectArticle"
self.version = "1"
self.description = "Process JATS zip article to Drupal nodes workflow"
self.default_execution_start_to_close_timeout = 60 * 5
self.default_task_start_to_close_timeout = 30
# Get the input from the JSON decision response
data = self.get_input()
# JSON format workflow definition, for now - may be from common YAML definition
workflow_definition = {
"name": self.name,
"version": self.version,
"task_list": self.settings.default_task_list,
"input": data,
"start":
{
"requirements": None
},
"steps":
[
{
"activity_type": "PingWorker",
"activity_id": "PingWorker",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 300,
"schedule_to_close_timeout": 300,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 300
},
{
"activity_type": "ExpandArticle",
"activity_id": "ExpandArticle",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 15,
"schedule_to_close_timeout": 60 * 15,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 15
},
{
"activity_type": "ApplyVersionNumber",
"activity_id": "ApplyVersionNumber",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 10,
"schedule_to_close_timeout": 60 * 10,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 10
},
{
"activity_type": "ScheduleCrossref",
"activity_id": "ScheduleCrossref",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 5,
"schedule_to_close_timeout": 60 * 5,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 5
},
{
"activity_type": "ConvertJATS",
"activity_id": "ConvertJATS",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 5,
"schedule_to_close_timeout": 60 * 5,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 5
},
{
"activity_type": "SetPublicationStatus",
"activity_id": "SetPublicationStatus",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 5,
"schedule_to_close_timeout": 60 * 5,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 5
},
{
"activity_type": "ResizeImages",
"activity_id": "ResizeImages",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 30,
"schedule_to_close_timeout": 60 * 30,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 30
},
{
"activity_type": "DepositAssets",
"activity_id": "DepositAssets",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 5,
"schedule_to_close_timeout": 60 * 5,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 5
},
{
"activity_type": "PreparePostEIF",
"activity_id": "PreparePostEIF",
"version": "1",
"input": data,
"control": None,
"heartbeat_timeout": 60 * 5,
"schedule_to_close_timeout": 60 * 5,
"schedule_to_start_timeout": 300,
"start_to_close_timeout": 60 * 5
},
],
"finish":
{
"requirements": None
}
}
self.load_definition(workflow_definition)
| jhroot/elife-bot | workflow/workflow_PublishPerfectArticle.py | Python | mit | 5,903 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Models that have physical origins.
"""
import warnings
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["BlackBody", "Drude1D"]
class BlackBody(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
scale : float or :class:`~astropy.units.Quantity`
Scale factor
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (cm2 Hz s sr)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(default=5000.0, min=0, unit=u.K)
scale = Parameter(default=1.0, min=0)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# check the units of scale and setup the output units
bb_unit = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr) # default unit
# use the scale that was used at initialization for determining the units to return
# to support returning the right units when fitting where units are stripped
if hasattr(self.scale, "unit") and self.scale.unit is not None:
# check that the units on scale are covertable to surface brightness units
if not self.scale.unit.is_equivalent(bb_unit, u.spectral_density(x)):
raise ValueError(
f"scale units not surface brightness: {self.scale.unit}"
)
# use the scale passed to get the value for scaling
if hasattr(scale, "unit"):
mult_scale = scale.value
else:
mult_scale = scale
bb_unit = self.scale.unit
else:
mult_scale = scale
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1) / u.sr
y = mult_scale * bb_nu.to(bb_unit, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
else:
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {"x": u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
# bolometric flux in the native units of the planck function
native_bolflux = (
self.scale.value * const.sigma_sb * self.temperature ** 4 / np.pi
)
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm ** 2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
class Drude1D(Fittable1DModel):
"""
Drude model based one the behavior of electons in materials (esp. metals).
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
Model formula:
.. math:: f(x) = A \\frac{(fwhm/x_0)^2}{((x/x_0 - x_0/x)^2 + (fwhm/x_0)^2}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Drude1D
fig, ax = plt.subplots()
# generate the curves and plot them
x = np.arange(7.5 , 12.5 , 0.1)
dmodel = Drude1D(amplitude=1.0, fwhm=1.0, x_0=10.0)
ax.plot(x, dmodel(x))
ax.set_xlabel('x')
ax.set_ylabel('F(x)')
plt.show()
"""
amplitude = Parameter(default=1.0)
x_0 = Parameter(default=1.0)
fwhm = Parameter(default=1.0)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""
One dimensional Drude model function
"""
return (
amplitude
* ((fwhm / x_0) ** 2)
/ ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
)
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""
Drude1D model function derivatives.
"""
d_amplitude = (fwhm / x_0) ** 2 / ((x / x_0 - x_0 / x) ** 2 + (fwhm / x_0) ** 2)
d_x_0 = (
-2
* amplitude
* d_amplitude
* (
(1 / x_0)
+ d_amplitude
* (x_0 ** 2 / fwhm ** 2)
* (
(-x / x_0 - 1 / x) * (x / x_0 - x_0 / x)
- (2 * fwhm ** 2 / x_0 ** 3)
)
)
)
d_fwhm = (2 * amplitude * d_amplitude / fwhm) * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {"x": self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit["x"],
"fwhm": inputs_unit["x"],
"amplitude": outputs_unit["y"],
}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
else:
return {'y': self.amplitude.unit}
@x_0.validator
def x_0(self, val):
if val == 0:
raise InputParameterError("0 is not an allowed value for x_0")
def bounding_box(self, factor=50):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
| stargaser/astropy | astropy/modeling/physical_models.py | Python | bsd-3-clause | 9,673 |
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from cinder import context
from cinder.i18n import _LW
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vmax_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class EMCVMAXFCDriver(driver.FibreChannelDriver):
"""EMC FC Drivers for VMAX using SMI-S.
Version history:
1.0.0 - Initial driver
1.1.0 - Multiple pools and thick/thin provisioning,
performance enhancement.
2.0.0 - Add driver requirement functions
2.1.0 - Add consistency group functions
2.1.1 - Fixed issue with mismatched config (bug #1442376)
2.1.2 - Clean up failed clones (bug #1440154)
2.1.3 - Fixed a problem with FAST support (bug #1435069)
2.2.0 - Add manage/unmanage
2.2.1 - Support for SE 8.0.3
2.2.2 - Update Consistency Group
"""
VERSION = "2.2.2"
def __init__(self, *args, **kwargs):
super(EMCVMAXFCDriver, self).__init__(*args, **kwargs)
self.common = emc_vmax_common.EMCVMAXCommon(
'FC',
configuration=self.configuration)
self.zonemanager_lookup_service = fczm_utils.create_lookup_service()
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
volpath = self.common.create_volume(volume)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
volpath = self.common.create_volume_from_snapshot(volume, snapshot)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volpath = self.common.create_cloned_volume(volume, src_vref)
model_update = {}
volume['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = volume['provider_location']
return model_update
def delete_volume(self, volume):
"""Deletes an EMC volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
volpath = self.common.create_snapshot(snapshot, volume)
model_update = {}
snapshot['provider_location'] = six.text_type(volpath)
model_update['provider_location'] = snapshot['provider_location']
return model_update
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
ctxt = context.get_admin_context()
volumename = snapshot['volume_name']
index = volumename.index('-')
volumeid = volumename[index + 1:]
volume = self.db.volume_get(ctxt, volumeid)
self.common.delete_snapshot(snapshot, volume)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
"""
device_info = self.common.initialize_connection(
volume, connector)
device_number = device_info['hostlunid']
storage_system = device_info['storagesystem']
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': device_number,
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone addition: %(data)s.",
{'data': data})
return data
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:param volume: the volume object
:param connector: the connector object
:returns: dict -- the target_wwns and initiator_target_map if the
zone is to be removed, otherwise empty
"""
data = {'driver_volume_type': 'fibre_channel',
'data': {}}
loc = volume['provider_location']
name = eval(loc)
storage_system = name['keybindings']['SystemName']
LOG.debug("Start FC detach process for volume: %(volume)s.",
{'volume': volume['name']})
mvInstanceName = self.common.get_masking_view_by_volume(
volume, connector)
if mvInstanceName is not None:
portGroupInstanceName = (
self.common.get_port_group_from_masking_view(
mvInstanceName))
LOG.debug("Found port group: %(portGroup)s "
"in masking view %(maskingView)s.",
{'portGroup': portGroupInstanceName,
'maskingView': mvInstanceName})
self.common.terminate_connection(volume, connector)
LOG.debug("Looking for masking views still associated with "
"Port Group %s.", portGroupInstanceName)
mvInstances = self.common.get_masking_views_by_port_group(
portGroupInstanceName)
if len(mvInstances) > 0:
LOG.debug("Found %(numViews)lu MaskingViews.",
{'numViews': len(mvInstances)})
else: # No views found.
target_wwns, init_targ_map = self._build_initiator_target_map(
storage_system, volume, connector)
LOG.debug("No MaskingViews were found. Deleting zone.")
data = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
LOG.debug("Return FC data for zone removal: %(data)s.",
{'data': data})
else:
LOG.warning(_LW("Volume %(volume)s is not in any masking view."),
{'volume': volume['name']})
return data
def _build_initiator_target_map(self, storage_system, volume, connector):
"""Build the target_wwns and the initiator target map."""
target_wwns = []
init_targ_map = {}
initiator_wwns = connector['wwpns']
if self.zonemanager_lookup_service:
fc_targets = self.common.get_target_wwns_from_masking_view(
storage_system, volume, connector)
mapping = (
self.zonemanager_lookup_service.
get_device_mapping_from_network(initiator_wwns, fc_targets))
for entry in mapping:
map_d = mapping[entry]
target_wwns.extend(map_d['target_port_wwn_list'])
for initiator in map_d['initiator_port_wwn_list']:
init_targ_map[initiator] = map_d['target_port_wwn_list']
else: # No lookup service, pre-zoned case.
target_wwns = self.common.get_target_wwns(storage_system,
connector)
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return list(set(target_wwns)), init_targ_map
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self.common.extend_volume(volume, new_size)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
:param refresh: boolean -- If True, run update the stats first.
:returns: dict -- the stats dict
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = self.common.update_volume_stats()
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
self._stats = data
def migrate_volume(self, ctxt, volume, host):
"""Migrate a volume from one Volume Backend to another.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param host: the host dict holding the relevant target(destination)
information
:returns: boolean -- Always returns True
:returns: dict -- Empty dict {}
"""
return self.common.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Migrate volume to another host using retype.
:param ctxt: context
:param volume: the volume object including the volume_type_id
:param new_type: the new volume type.
:param diff: Unused parameter.
:param host: the host dict holding the relevant
target(destination) information
:returns: boolean -- True if retype succeeded, False if error
"""
return self.common.retype(ctxt, volume, new_type, diff, host)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.common.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
volumes = self.db.volume_get_all_by_group(context, group['id'])
return self.common.delete_consistencygroup(
context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(context, cgsnapshot, self.db)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(context, cgsnapshot, self.db)
def manage_existing(self, volume, external_ref):
"""Manages an existing VMAX Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
return self.common.manage_existing(volume, external_ref)
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing VMAX volume to manage_existing.
:param self: reference to class
:param volume: the volume object including the volume_type_id
:param external_ref: reference to the existing volume
:returns: size of the volume in GB
"""
return self.common.manage_existing_get_size(volume, external_ref)
def unmanage(self, volume):
"""Export VMAX volume from Cinder.
Leave the volume intact on the backend array.
"""
return self.common.unmanage(volume)
def update_consistencygroup(self, context, group,
add_volumes, remove_volumes):
"""Updates LUNs in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
| JioCloud/cinder | cinder/volume/drivers/emc/emc_vmax_fc.py | Python | apache-2.0 | 13,846 |
#-*- coding:utf-8 -*-
import httplib2
import urllib
import json
import traceback
import bs4
from bs4 import BeautifulSoup
from bussiness import *
from orm import *
mainpage_url = 'http://news.yahoo.com/us/most-popular/'
comment_base_url = 'http://news.yahoo.com/_xhr/contentcomments/get_all/?'
reply_base_url = 'http://news.yahoo.com/_xhr/contentcomments/get_replies/?'
t_news_url = 'http://news.yahoo.com/ebola-victims-sister-says-hospital-denied-request-025725064.html'
ch = CommentHandler()
def urlencode(base, param):
param_code = urllib.urlencode(param)
rurl = base + param_code
return rurl
def get_response(url):
head, content = httplib2.Http().request(url)
return head, content
def get_news_mainpage():
mainpage_head, mainpage_content = get_response(mainpage_url)
return mainpage_head, mainpage_content
def parse_news_from_mainpage(mainpage_content):
soup = BeautifulSoup(mainpage_content, from_encoding='utf-8')
l_news = soup.find_all('div', {'class':'body-wrap'})
r_list = []
for n in l_news:
if n.p:
s_url = n.h3.a['href']
if not s_url.startswith('http') and not s_url.startswith('/video') and not s_url.startswith('/photos') and not s_url.startswith('/blogs'):
r_list.append(['http://news.yahoo.com' + n.h3.a['href'], n.p.string])
return r_list
def parse_news_title_and_content(news_url):
head, content = httplib2.Http().request(news_url)
soup = BeautifulSoup(content.decode('utf-8'))
title = soup.find('h1', {'class':'headline'}).string
news_time = soup.find('abbr').string
l_p = soup.find_all('p', {'class':False})
c_num = soup.find('span', {'id':'total-comment-count'}).string
press_name = soup.find('img', {'class':'provider-img'})
content_id = soup.find('section', {'id':'mediacontentstory'})
c_id = content_id['data-uuid']
p_name = ''
if press_name:
p_name = press_name['alt']
else:
p_span = soup.find('span', {'class':'provider-name'})
if p_span:
p_name = p_span.string
l_content = []
for p in l_p:
if len(p.contents) > 0:
if p.string:
l_content.append(p.string)
content = ''
content = '\n'.join(l_content).encode('utf-8')
news_dict = {}
news_dict['title'] = title.encode('utf-8')
news_dict['content'] = content
news_dict['comment_num'] = int(c_num)
news_dict['press_name'] = p_name.encode('utf-8')
news_dict['content_id'] = c_id
news_dict['time'] = news_time
return news_dict
def parse_comment_num(news_url):
head, content = httplib2.Http().request(news_url)
soup = BeautifulSoup(content.decode('utf-8'))
c_num = soup.find('span', {'id':'total-comment-count'}).string
if c_num:
return int(c_num.strip())
else:
return 0
def parse_comments(session, content_url, content_id, current_index, news_id):
print content_url
try:
head, content = httplib2.Http().request(content_url)
j_data = json.loads(content)
more_url = j_data['more']
soup = BeautifulSoup(j_data['commentList'])
comment_list = soup.find_all('li', {'data-uid':True})
for comment in comment_list:
if not comment.has_key('data-cmt'):
comment_id=''
else:
comment_id = comment['data-cmt']
span_nickname = comment.find('span', {'class':'int profile-link'})
span_timestamp = comment.find('span', {'class':'comment-timestamp'})
p_comment_content = comment.find('p', {'class': 'comment-content'})
div_thumb_up = comment.find('div', {'id':'up-vote-box'})
div_thumb_down = comment.find('div', {'id':'down-vote-box'})
nickname = span_nickname.string
timestamp = ''
if span_timestamp:
timestamp = span_timestamp.string
content = '\n'.join([x.string.strip() for x in p_comment_content.contents if x.string])
thumb_up_count = int(div_thumb_up.span.string)
thumb_down_count = int(div_thumb_down.span.string)
span_reply = comment.find('span', {'class':'replies int'})
has_reply = 0
if span_reply:
has_reply = 1
#print nickname, timestamp, thumb_up_count, thumb_down_count, content.encode('utf-8')
try:
comment_id_db = ch.insert_comment(session, nickname.encode('utf-8'), thumb_up_count, thumb_down_count, content.encode('utf-8'), 0, has_reply, -1, news_id)
session.flush()
if span_reply and comment_id_db != -1:
reply_url = urlencode(reply_base_url, {'content_id':content_id, 'comment_id':comment_id})
parse_reply_comment(session, reply_url, content_id, comment_id, comment_id_db, 0, news_id)
except:
traceback.print_exc()
finally:
session.close()
if more_url:
m_soup = BeautifulSoup(more_url)
nextpage_url = urlencode(comment_base_url, {'content_id':content_id}) + '&'+ m_soup.li.span['data-query']
current_index = current_index + len(comment_list)
print current_index
parse_comments(session, nextpage_url, content_id, current_index, news_id)
else:
return
except:
traceback.print_exc()
def parse_reply_comment(session, content_url, content_id, comment_id, comment_id_db, current_index, news_id):
print content_url
head, content = httplib2.Http().request(content_url)
j_data = json.loads(content)
more_url = j_data['more']
soup = BeautifulSoup(j_data['commentList'])
reply_comment_list = soup.find_all('li', {'data-uid':True})
for comment in reply_comment_list:
span_nickname = comment.find('span', {'class':'int profile-link'})
span_timestamp = comment.find('span', {'class':'comment-timestamp'})
p_comment_content = comment.find('p', {'class': 'comment-content'})
div_thumb_up = comment.find('div', {'id':'up-vote-box'})
div_thumb_down = comment.find('div', {'id':'down-vote-box'})
nickname = span_nickname.string
timestamp = span_timestamp.string
content = '\n'.join([x.string.strip() for x in p_comment_content.contents if x.string])
thumb_up_count = int(div_thumb_up.span.string)
thumb_down_count = int(div_thumb_down.span.string)
try:
ch.insert_comment(session, nickname.encode('utf-8'), thumb_up_count, thumb_down_count, content.encode('utf-8'), 1, 0, comment_id_db, news_id)
session.flush()
except:
traceback.print_exc()
finally:
session.close()
if more_url:
m_soup = BeautifulSoup(more_url)
nextpage_url = urlencode(reply_base_url, {'content_id':content_id, 'comment_id':comment_id}) + '&'+ m_soup.li.span['data-query']
current_index = current_index + len(reply_comment_list)
parse_reply_comment(session, nextpage_url, content_id, comment_id, comment_id_db, current_index, news_id)
else:
return
if __name__=='__main__':
yahoo_mainpage_head, yahoo_mainpage_content = get_news_mainpage()
parse_news_from_mainpage(yahoo_mainpage_content)
news_dict = parse_news_title_and_content(t_news_url)
news_c_id = news_dict['content_id']
param_dict = {'content_id':news_c_id, 'sortBy':'highestRated'}
rurl = urlencode(comment_base_url, param_dict)
parse_comments(rurl, news_c_id, 0)
| shibei00/yahoo_news_crawler | crawler.py | Python | gpl-3.0 | 7,619 |
from .integrator_template import IntegratorTemplate
from .. import backend as D
__all__ = [
'ExplicitRungeKuttaIntegrator',
'ExplicitSymplecticIntegrator'
]
class ExplicitRungeKuttaIntegrator(IntegratorTemplate):
"""
A base class for all explicit Runge-Kutta methods with a lower triangular Butcher Tableau.
An ExplicitRungeKuttaIntegrator derived object corresponds to a
numerical integrator tailored to a particular dynamical system
with an integration scheme defined by the Butcher tableau of the child
class.
A child class that defines two sets of coefficients for final_state
is considered an adaptive method and uses the adaptive stepping
based on the local error estimate derived from the two sets of
final_state coefficients. Furthermore, local extrapolation is used.
Attributes
----------
tableau : numpy array, shape (N, N+1)
A numpy array with N stages and N+1 entries per stage where the first column
is the timestep fraction and the remaining columns are the stage coefficients.
final_state : numpy array, shape (k, N)
A numpy array with N+1 coefficients defining the final stage coefficients.
If k == 2, then the method is considered adaptive and the first row is
the lower order method and the second row is the higher order method
whose difference gives the local error of the numerical integration.
__symplectic__ : bool
True if the method is symplectic.
"""
tableau = None
final_state = None
order = 1
__symplectic__ = False
def __init__(self, sys_dim, dtype=None, rtol=None, atol=None, device=None):
if dtype is None:
self.tableau = D.array(self.tableau)
self.final_state = D.array(self.final_state)
else:
self.tableau = D.to_type(self.tableau, dtype)
self.final_state = D.to_type(self.final_state, dtype)
self.dim = sys_dim
self.rtol = rtol
self.atol = atol
self.adaptive = D.shape(self.final_state)[0] == 2
self.num_stages = D.shape(self.tableau)[0]
self.aux = D.zeros((self.num_stages, ) + self.dim)
if dtype is not None:
if D.backend() == 'torch':
self.aux = self.aux.to(dtype)
else:
self.aux = self.aux.astype(dtype)
if D.backend() == 'torch':
self.aux = self.aux.to(device)
self.tableau = self.tableau.to(device)
self.final_state = self.final_state.to(device)
def forward(self, rhs, initial_time, initial_state, constants, timestep):
if self.tableau is None:
raise NotImplementedError("In order to use the fixed step integrator, subclass this class and populate the butcher tableau")
else:
aux = self.aux
tableau_idx_expand = tuple([slice(1, None, None)] + [None] * (aux.ndim - 1))
for stage in range(self.num_stages):
current_state = initial_state + D.sum(self.tableau[stage][tableau_idx_expand] * aux, axis=0)
aux[stage] = rhs(initial_time + self.tableau[stage, 0]*timestep, current_state, **constants) * timestep
self.dState = D.sum(self.final_state[0][tableau_idx_expand] * aux, axis=0)
self.dTime = timestep
if self.adaptive:
diff = self.get_error_estimate(self.dState, self.dTime, aux, tableau_idx_expand)
timestep, redo_step = self.update_timestep(initial_state, self.dState, diff, initial_time, timestep)
if redo_step:
timestep, (self.dTime, self.dState) = self(rhs, initial_time, initial_state, constants, timestep)
return timestep, (self.dTime, self.dState)
def get_error_estimate(self, dState, dTime, aux, tableau_idx_expand):
return dState - D.sum(self.final_state[1][tableau_idx_expand] * aux, axis=0)
def dense_output(self, rhs, initial_time, initial_state):
return CubicHermiteInterp(
initial_time,
initial_time + self.dTime,
initial_state,
initial_state + self.dState,
rhs(initial_time, initial_state),
rhs(initial_time + self.dTime, initial_state + self.dState)
)
__call__ = forward
class ExplicitSymplecticIntegrator(IntegratorTemplate):
"""
A base class for all symplectic numerical integration methods.
A ExplicitSymplecticIntegrator derived object corresponds to a
numerical integrator tailored to a particular dynamical system
with an integration scheme defined by the sequence of drift-kick
coefficients in tableau.
An explicit symplectic integrator may be considered as a sequence of carefully
picked drift and kick stages that build off the previous stage which is
the implementation considered here. A masking array of indices indicates
the drift and kick variables that are updated at each stage.
In a system defined by a Hamiltonian of q and p (generalised position and
generalised momentum respectively), the drift stages update q and the kick
stages update p. For a conservative Hamiltonian, a symplectic method will
minimise the drift in the Hamiltonian during the integration.
Attributes
----------
tableau : numpy array, shape (N, N+1)
A numpy array with N stages and N+1 entries per stage where the first column
is the timestep fraction and the remaining columns are the stage coefficients.
__symplectic__ : bool
True if the method is symplectic.
"""
tableau = None
__symplectic__ = True
def __init__(self, sys_dim, dtype=None, staggered_mask=None, rtol=None, atol=None, device=None):
if staggered_mask is None:
staggered_mask = D.arange(sys_dim[0]//2, sys_dim[0], dtype=D.int64)
self.staggered_mask = D.zeros(sys_dim, dtype=D.bool)
self.staggered_mask[staggered_mask] = 1
else:
self.staggered_mask = D.to_type(staggered_mask, D.bool)
if dtype is None:
self.tableau = D.array(self.tableau)
else:
self.tableau = D.to_type(self.tableau, dtype)
self.dim = sys_dim
self.rtol = rtol
self.atol = atol
self.adaptive = False
self.num_stages = D.shape(self.tableau)[0]
self.msk = self.staggered_mask
self.nmsk = D.logical_not(self.staggered_mask)
if D.backend() == 'torch':
self.tableau = self.tableau.to(device)
self.msk = self.msk.to(self.tableau)
self.nmsk = self.nmsk.to(self.tableau)
def forward(self, rhs, initial_time, initial_state, constants, timestep):
if self.tableau is None:
raise NotImplementedError("In order to use the fixed step integrator, subclass this class and populate the butcher tableau")
else:
msk = self.msk
nmsk = self.nmsk
current_time = D.copy(initial_time)
current_state = D.copy(initial_state)
self.dState = D.zeros_like(current_state)
for stage in range(self.num_stages):
aux = rhs(current_time, initial_state + self.dState, **constants) * timestep
current_time = current_time + timestep * self.tableau[stage, 0]
self.dState += aux * self.tableau[stage, 1] * msk + aux * self.tableau[stage, 2] * nmsk
self.dTime = timestep
return timestep, (self.dTime, self.dState)
def dense_output(self, rhs, initial_time, initial_state, constants):
return CubicHermiteInterp(
initial_time,
initial_time + self.dTime,
initial_state,
initial_state + self.dState,
rhs(initial_time, initial_state, **constants),
rhs(initial_time + self.dTime, initial_state + self.dState, **constants)
)
__call__ = forward
| Microno95/DESolver | desolver/integrators/integrator_types.py | Python | mit | 8,340 |
import operator
import numpy as np
import pytest
from pandas.compat.numpy import _np_version_under1p20
import pandas as pd
import pandas._testing as tm
from pandas.core import ops
from pandas.core.arrays.sparse import SparseArray, SparseDtype
@pytest.fixture(params=["integer", "block"])
def kind(request):
"""kind kwarg to pass to SparseArray/SparseSeries"""
return request.param
@pytest.fixture(params=[True, False])
def mix(request):
# whether to operate op(sparse, dense) instead of op(sparse, sparse)
return request.param
class TestSparseArrayArithmetics:
_base = np.array
_klass = SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix, op):
with np.errstate(invalid="ignore", divide="ignore"):
if mix:
result = op(a, b_dense).to_dense()
else:
result = op(a, b).to_dense()
if op in [operator.truediv, ops.rtruediv]:
# pandas uses future division
expected = op(a_dense * 1.0, b_dense)
else:
expected = op(a_dense, b_dense)
if op in [operator.floordiv, ops.rfloordiv]:
# Series sets 1//0 to np.inf, which SparseArray does not do (yet)
mask = np.isinf(expected)
if mask.any():
expected[mask] = np.nan
self._assert(result, expected)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert isinstance(res.dtype, SparseDtype)
assert res.dtype.subtype == np.bool_
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid="ignore"):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
@pytest.mark.parametrize("scalar", [0, 1, 3])
@pytest.mark.parametrize("fill_value", [None, 0, 2])
def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar, request
):
op = all_arithmetic_functions
if not _np_version_under1p20:
if op in [operator.floordiv, ops.rfloordiv]:
mark = pytest.mark.xfail(strict=False, reason="GH#38172")
request.node.add_marker(mark)
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
def test_float_scalar_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index_without_nans(
self, kind, mix, all_arithmetic_functions, request
):
# when sp_index are the same
op = all_arithmetic_functions
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_with_nans(
self, kind, mix, all_arithmetic_functions, request
):
# when sp_index are the same
op = all_arithmetic_functions
if not _np_version_under1p20:
if op in [operator.floordiv, ops.rfloordiv]:
mark = pytest.mark.xfail(strict=False, reason="GH#38172")
request.node.add_marker(mark)
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_comparison(self, kind):
# when sp_index are the same
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = self._base([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_different_kind(self, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind="integer")
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=0)
b = self._klass(rvalues, kind="block", fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind="integer", fill_value=1)
b = self._klass(rvalues, kind="block", fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_comparison(self, kind):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype, fill_value=1)
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_int_array_comparison(self, kind):
dtype = "int64"
# int32 NI ATM
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_same_index(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, True], dtype=np.bool_)
rvalues = self._base([True, False, True, True], dtype=np.bool_)
a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_array_logical(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = self._base([True, False, True, False, True, True], dtype=np.bool_)
rvalues = self._base([True, False, False, True, False, True], dtype=np.bool_)
a = self._klass(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request):
op = all_arithmetic_functions
if not _np_version_under1p20:
if op in [operator.floordiv, ops.rfloordiv] and mix:
mark = pytest.mark.xfail(strict=True, reason="GH#38172")
request.node.add_marker(mark)
rdtype = "int64"
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_mixed_array_comparison(self, kind):
rdtype = "int64"
# int32 NI ATM
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_xor(self):
s = SparseArray([True, True, False, False])
t = SparseArray([True, False, True, False])
result = s ^ t
sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32"))
expected = SparseArray([False, True, True], sparse_index=sp_index)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
arr = SparseArray([0, 1], fill_value=0)
result = op(arr, [0, 1])
expected = op(arr, SparseArray([0, 1]))
tm.assert_sp_array_equal(result, expected)
def test_with_dataframe():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
df = pd.DataFrame([[1, 2], [3, 4]])
result = arr.__add__(df)
assert result is NotImplemented
def test_with_zerodim_ndarray():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
result = arr * np.array(2)
expected = arr * 2
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.abs, np.exp])
@pytest.mark.parametrize(
"arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])]
)
def test_ufuncs(ufunc, arr):
result = ufunc(arr)
fill_value = ufunc(arr.fill_value)
expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
(SparseArray([0, 0, 0]), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
],
)
@pytest.mark.parametrize("ufunc", [np.add, np.greater])
def test_binary_ufuncs(ufunc, a, b):
# can't say anything about fill value here.
result = ufunc(a, b)
expected = ufunc(np.asarray(a), np.asarray(b))
assert isinstance(result, SparseArray)
tm.assert_numpy_array_equal(np.asarray(result), expected)
def test_ndarray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
ndarray += sparray
expected = np.array([0, 3, 2, 3])
tm.assert_numpy_array_equal(ndarray, expected)
def test_sparray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
sparray += ndarray
expected = SparseArray([0, 3, 2, 3], fill_value=0)
tm.assert_sp_array_equal(sparray, expected)
@pytest.mark.parametrize("fill_value", [True, False])
def test_invert(fill_value):
arr = np.array([True, False, False, True])
sparray = SparseArray(arr, fill_value=fill_value)
result = ~sparray
expected = SparseArray(~arr, fill_value=not fill_value)
tm.assert_sp_array_equal(result, expected)
result = ~pd.Series(sparray)
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
result = ~pd.DataFrame({"A": sparray})
expected = pd.DataFrame({"A": expected})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("fill_value", [0, np.nan])
@pytest.mark.parametrize("op", [operator.pos, operator.neg])
def test_unary_op(op, fill_value):
arr = np.array([0, 1, np.nan, 2])
sparray = SparseArray(arr, fill_value=fill_value)
result = op(sparray)
expected = SparseArray(op(arr), fill_value=op(fill_value))
tm.assert_sp_array_equal(result, expected)
| jreback/pandas | pandas/tests/arrays/sparse/test_arithmetics.py | Python | bsd-3-clause | 20,217 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The general tab of the configuration dialog.
"""
import logging
from PyQt4 import QtCore, QtGui
from openlp.core.common import Registry, Settings, UiStrings, translate
from openlp.core.lib import SettingsTab, ScreenList
log = logging.getLogger(__name__)
class GeneralTab(SettingsTab):
"""
GeneralTab is the general settings tab in the settings dialog.
"""
def __init__(self, parent):
"""
Initialise the general settings tab
"""
self.screens = ScreenList()
self.icon_path = ':/icon/openlp-logo-16x16.png'
general_translated = translate('OpenLP.GeneralTab', 'General')
super(GeneralTab, self).__init__(parent, 'Core', general_translated)
def setupUi(self):
"""
Create the user interface for the general settings tab
"""
self.setObjectName('GeneralTab')
super(GeneralTab, self).setupUi()
self.tab_layout.setStretch(1, 1)
# Monitors
self.monitor_group_box = QtGui.QGroupBox(self.left_column)
self.monitor_group_box.setObjectName('monitor_group_box')
self.monitor_layout = QtGui.QGridLayout(self.monitor_group_box)
self.monitor_layout.setObjectName('monitor_layout')
self.monitor_radio_button = QtGui.QRadioButton(self.monitor_group_box)
self.monitor_radio_button.setObjectName('monitor_radio_button')
self.monitor_layout.addWidget(self.monitor_radio_button, 0, 0, 1, 5)
self.monitor_combo_box = QtGui.QComboBox(self.monitor_group_box)
self.monitor_combo_box.setObjectName('monitor_combo_box')
self.monitor_layout.addWidget(self.monitor_combo_box, 1, 1, 1, 4)
# Display Position
self.override_radio_button = QtGui.QRadioButton(self.monitor_group_box)
self.override_radio_button.setObjectName('override_radio_button')
self.monitor_layout.addWidget(self.override_radio_button, 2, 0, 1, 5)
# Custom position
self.custom_x_label = QtGui.QLabel(self.monitor_group_box)
self.custom_x_label.setObjectName('custom_x_label')
self.monitor_layout.addWidget(self.custom_x_label, 3, 1)
self.custom_X_value_edit = QtGui.QSpinBox(self.monitor_group_box)
self.custom_X_value_edit.setObjectName('custom_X_value_edit')
self.custom_X_value_edit.setRange(-9999, 9999)
self.monitor_layout.addWidget(self.custom_X_value_edit, 4, 1)
self.custom_y_label = QtGui.QLabel(self.monitor_group_box)
self.custom_y_label.setObjectName('custom_y_label')
self.monitor_layout.addWidget(self.custom_y_label, 3, 2)
self.custom_Y_value_edit = QtGui.QSpinBox(self.monitor_group_box)
self.custom_Y_value_edit.setObjectName('custom_Y_value_edit')
self.custom_Y_value_edit.setRange(-9999, 9999)
self.monitor_layout.addWidget(self.custom_Y_value_edit, 4, 2)
self.custom_width_label = QtGui.QLabel(self.monitor_group_box)
self.custom_width_label.setObjectName('custom_width_label')
self.monitor_layout.addWidget(self.custom_width_label, 3, 3)
self.custom_width_value_edit = QtGui.QSpinBox(self.monitor_group_box)
self.custom_width_value_edit.setObjectName('custom_width_value_edit')
self.custom_width_value_edit.setRange(1, 9999)
self.monitor_layout.addWidget(self.custom_width_value_edit, 4, 3)
self.custom_height_label = QtGui.QLabel(self.monitor_group_box)
self.custom_height_label.setObjectName('custom_height_label')
self.monitor_layout.addWidget(self.custom_height_label, 3, 4)
self.custom_height_value_edit = QtGui.QSpinBox(self.monitor_group_box)
self.custom_height_value_edit.setObjectName('custom_height_value_edit')
self.custom_height_value_edit.setRange(1, 9999)
self.monitor_layout.addWidget(self.custom_height_value_edit, 4, 4)
self.display_on_monitor_check = QtGui.QCheckBox(self.monitor_group_box)
self.display_on_monitor_check.setObjectName('monitor_combo_box')
self.monitor_layout.addWidget(self.display_on_monitor_check, 5, 0, 1, 5)
# Set up the stretchiness of each column, so that the first column
# less stretchy (and therefore smaller) than the others
self.monitor_layout.setColumnStretch(0, 1)
self.monitor_layout.setColumnStretch(1, 3)
self.monitor_layout.setColumnStretch(2, 3)
self.monitor_layout.setColumnStretch(3, 3)
self.monitor_layout.setColumnStretch(4, 3)
self.left_layout.addWidget(self.monitor_group_box)
# CCLI Details
self.ccli_group_box = QtGui.QGroupBox(self.left_column)
self.ccli_group_box.setObjectName('ccli_group_box')
self.ccli_layout = QtGui.QFormLayout(self.ccli_group_box)
self.ccli_layout.setObjectName('ccli_layout')
self.number_label = QtGui.QLabel(self.ccli_group_box)
self.number_label.setObjectName('number_label')
self.number_edit = QtGui.QLineEdit(self.ccli_group_box)
self.number_edit.setValidator(QtGui.QIntValidator())
self.number_edit.setObjectName('number_edit')
self.ccli_layout.addRow(self.number_label, self.number_edit)
self.username_label = QtGui.QLabel(self.ccli_group_box)
self.username_label.setObjectName('username_label')
self.username_edit = QtGui.QLineEdit(self.ccli_group_box)
self.username_edit.setObjectName('username_edit')
self.ccli_layout.addRow(self.username_label, self.username_edit)
self.password_label = QtGui.QLabel(self.ccli_group_box)
self.password_label.setObjectName('password_label')
self.password_edit = QtGui.QLineEdit(self.ccli_group_box)
self.password_edit.setEchoMode(QtGui.QLineEdit.Password)
self.password_edit.setObjectName('password_edit')
self.ccli_layout.addRow(self.password_label, self.password_edit)
self.left_layout.addWidget(self.ccli_group_box)
# Background audio
self.audio_group_box = QtGui.QGroupBox(self.left_column)
self.audio_group_box.setObjectName('audio_group_box')
self.audio_layout = QtGui.QVBoxLayout(self.audio_group_box)
self.audio_layout.setObjectName('audio_layout')
self.start_paused_check_box = QtGui.QCheckBox(self.audio_group_box)
self.start_paused_check_box.setObjectName('start_paused_check_box')
self.audio_layout.addWidget(self.start_paused_check_box)
self.repeat_list_check_box = QtGui.QCheckBox(self.audio_group_box)
self.repeat_list_check_box.setObjectName('repeat_list_check_box')
self.audio_layout.addWidget(self.repeat_list_check_box)
self.left_layout.addWidget(self.audio_group_box)
self.left_layout.addStretch()
# Application Startup
self.startup_group_box = QtGui.QGroupBox(self.right_column)
self.startup_group_box.setObjectName('startup_group_box')
self.startup_layout = QtGui.QVBoxLayout(self.startup_group_box)
self.startup_layout.setObjectName('startup_layout')
self.warning_check_box = QtGui.QCheckBox(self.startup_group_box)
self.warning_check_box.setObjectName('warning_check_box')
self.startup_layout.addWidget(self.warning_check_box)
self.auto_open_check_box = QtGui.QCheckBox(self.startup_group_box)
self.auto_open_check_box.setObjectName('auto_open_check_box')
self.startup_layout.addWidget(self.auto_open_check_box)
self.show_splash_check_box = QtGui.QCheckBox(self.startup_group_box)
self.show_splash_check_box.setObjectName('show_splash_check_box')
self.startup_layout.addWidget(self.show_splash_check_box)
self.check_for_updates_check_box = QtGui.QCheckBox(self.startup_group_box)
self.check_for_updates_check_box.setObjectName('check_for_updates_check_box')
self.startup_layout.addWidget(self.check_for_updates_check_box)
self.right_layout.addWidget(self.startup_group_box)
# Application Settings
self.settings_group_box = QtGui.QGroupBox(self.right_column)
self.settings_group_box.setObjectName('settings_group_box')
self.settings_layout = QtGui.QFormLayout(self.settings_group_box)
self.settings_layout.setObjectName('settings_layout')
self.save_check_service_check_box = QtGui.QCheckBox(self.settings_group_box)
self.save_check_service_check_box.setObjectName('save_check_service_check_box')
self.settings_layout.addRow(self.save_check_service_check_box)
self.auto_unblank_check_box = QtGui.QCheckBox(self.settings_group_box)
self.auto_unblank_check_box.setObjectName('auto_unblank_check_box')
self.settings_layout.addRow(self.auto_unblank_check_box)
self.auto_preview_check_box = QtGui.QCheckBox(self.settings_group_box)
self.auto_preview_check_box.setObjectName('auto_preview_check_box')
self.settings_layout.addRow(self.auto_preview_check_box)
# Moved here from image tab
self.timeout_label = QtGui.QLabel(self.settings_group_box)
self.timeout_label.setObjectName('timeout_label')
self.timeout_spin_box = QtGui.QSpinBox(self.settings_group_box)
self.timeout_spin_box.setObjectName('timeout_spin_box')
self.timeout_spin_box.setRange(1, 180)
self.settings_layout.addRow(self.timeout_label, self.timeout_spin_box)
self.right_layout.addWidget(self.settings_group_box)
self.right_layout.addStretch()
# Signals and slots
self.override_radio_button.toggled.connect(self.on_override_radio_button_pressed)
self.custom_height_value_edit.valueChanged.connect(self.on_display_changed)
self.custom_width_value_edit.valueChanged.connect(self.on_display_changed)
self.custom_Y_value_edit.valueChanged.connect(self.on_display_changed)
self.custom_X_value_edit.valueChanged.connect(self.on_display_changed)
self.monitor_combo_box.currentIndexChanged.connect(self.on_display_changed)
# Reload the tab, as the screen resolution/count may have changed.
Registry().register_function('config_screen_changed', self.load)
# Remove for now
self.username_label.setVisible(False)
self.username_edit.setVisible(False)
self.password_label.setVisible(False)
self.password_edit.setVisible(False)
def retranslateUi(self):
"""
Translate the general settings tab to the currently selected language
"""
self.tab_title_visible = translate('OpenLP.GeneralTab', 'General')
self.monitor_group_box.setTitle(translate('OpenLP.GeneralTab', 'Monitors'))
self.monitor_radio_button.setText(translate('OpenLP.GeneralTab', 'Select monitor for output display:'))
self.display_on_monitor_check.setText(translate('OpenLP.GeneralTab', 'Display if a single screen'))
self.startup_group_box.setTitle(translate('OpenLP.GeneralTab', 'Application Startup'))
self.warning_check_box.setText(translate('OpenLP.GeneralTab', 'Show blank screen warning'))
self.auto_open_check_box.setText(translate('OpenLP.GeneralTab', 'Automatically open the last service'))
self.show_splash_check_box.setText(translate('OpenLP.GeneralTab', 'Show the splash screen'))
self.check_for_updates_check_box.setText(translate('OpenLP.GeneralTab', 'Check for updates to OpenLP'))
self.settings_group_box.setTitle(translate('OpenLP.GeneralTab', 'Application Settings'))
self.save_check_service_check_box.setText(translate('OpenLP.GeneralTab',
'Prompt to save before starting a new service'))
self.auto_unblank_check_box.setText(translate('OpenLP.GeneralTab', 'Unblank display when adding new live item'))
self.auto_preview_check_box.setText(translate('OpenLP.GeneralTab',
'Automatically preview next item in service'))
self.timeout_label.setText(translate('OpenLP.GeneralTab', 'Timed slide interval:'))
self.timeout_spin_box.setSuffix(translate('OpenLP.GeneralTab', ' sec'))
self.ccli_group_box.setTitle(translate('OpenLP.GeneralTab', 'CCLI Details'))
self.number_label.setText(UiStrings().CCLINumberLabel)
self.username_label.setText(translate('OpenLP.GeneralTab', 'SongSelect username:'))
self.password_label.setText(translate('OpenLP.GeneralTab', 'SongSelect password:'))
# Moved from display tab
self.override_radio_button.setText(translate('OpenLP.GeneralTab', 'Override display position:'))
self.custom_x_label.setText(translate('OpenLP.GeneralTab', 'X'))
self.custom_y_label.setText(translate('OpenLP.GeneralTab', 'Y'))
self.custom_height_label.setText(translate('OpenLP.GeneralTab', 'Height'))
self.custom_width_label.setText(translate('OpenLP.GeneralTab', 'Width'))
self.audio_group_box.setTitle(translate('OpenLP.GeneralTab', 'Background Audio'))
self.start_paused_check_box.setText(translate('OpenLP.GeneralTab', 'Start background audio paused'))
self.repeat_list_check_box.setText(translate('OpenLP.GeneralTab', 'Repeat track list'))
def load(self):
"""
Load the settings to populate the form
"""
settings = Settings()
settings.beginGroup(self.settings_section)
self.monitor_combo_box.clear()
self.monitor_combo_box.addItems(self.screens.get_screen_list())
monitor_number = settings.value('monitor')
self.monitor_combo_box.setCurrentIndex(monitor_number)
self.number_edit.setText(settings.value('ccli number'))
self.username_edit.setText(settings.value('songselect username'))
self.password_edit.setText(settings.value('songselect password'))
self.save_check_service_check_box.setChecked(settings.value('save prompt'))
self.auto_unblank_check_box.setChecked(settings.value('auto unblank'))
self.display_on_monitor_check.setChecked(self.screens.display)
self.warning_check_box.setChecked(settings.value('blank warning'))
self.auto_open_check_box.setChecked(settings.value('auto open'))
self.show_splash_check_box.setChecked(settings.value('show splash'))
self.check_for_updates_check_box.setChecked(settings.value('update check'))
self.auto_preview_check_box.setChecked(settings.value('auto preview'))
self.timeout_spin_box.setValue(settings.value('loop delay'))
self.monitor_radio_button.setChecked(not settings.value('override position',))
self.override_radio_button.setChecked(settings.value('override position'))
self.custom_X_value_edit.setValue(settings.value('x position'))
self.custom_Y_value_edit.setValue(settings.value('y position'))
self.custom_height_value_edit.setValue(settings.value('height'))
self.custom_width_value_edit.setValue(settings.value('width'))
self.start_paused_check_box.setChecked(settings.value('audio start paused'))
self.repeat_list_check_box.setChecked(settings.value('audio repeat list'))
settings.endGroup()
self.monitor_combo_box.setDisabled(self.override_radio_button.isChecked())
self.custom_X_value_edit.setEnabled(self.override_radio_button.isChecked())
self.custom_Y_value_edit.setEnabled(self.override_radio_button.isChecked())
self.custom_height_value_edit.setEnabled(self.override_radio_button.isChecked())
self.custom_width_value_edit.setEnabled(self.override_radio_button.isChecked())
self.display_changed = False
def save(self):
"""
Save the settings from the form
"""
settings = Settings()
settings.beginGroup(self.settings_section)
settings.setValue('monitor', self.monitor_combo_box.currentIndex())
settings.setValue('display on monitor', self.display_on_monitor_check.isChecked())
settings.setValue('blank warning', self.warning_check_box.isChecked())
settings.setValue('auto open', self.auto_open_check_box.isChecked())
settings.setValue('show splash', self.show_splash_check_box.isChecked())
settings.setValue('update check', self.check_for_updates_check_box.isChecked())
settings.setValue('save prompt', self.save_check_service_check_box.isChecked())
settings.setValue('auto unblank', self.auto_unblank_check_box.isChecked())
settings.setValue('auto preview', self.auto_preview_check_box.isChecked())
settings.setValue('loop delay', self.timeout_spin_box.value())
settings.setValue('ccli number', self.number_edit.displayText())
settings.setValue('songselect username', self.username_edit.displayText())
settings.setValue('songselect password', self.password_edit.displayText())
settings.setValue('x position', self.custom_X_value_edit.value())
settings.setValue('y position', self.custom_Y_value_edit.value())
settings.setValue('height', self.custom_height_value_edit.value())
settings.setValue('width', self.custom_width_value_edit.value())
settings.setValue('override position', self.override_radio_button.isChecked())
settings.setValue('audio start paused', self.start_paused_check_box.isChecked())
settings.setValue('audio repeat list', self.repeat_list_check_box.isChecked())
settings.endGroup()
# On save update the screens as well
self.post_set_up(True)
def post_set_up(self, postUpdate=False):
"""
Apply settings after settings tab has loaded and most of the system so must be delayed
"""
self.settings_form.register_post_process('slidecontroller_live_spin_delay')
# Do not continue on start up.
if not postUpdate:
return
self.screens.set_current_display(self.monitor_combo_box.currentIndex())
self.screens.display = self.display_on_monitor_check.isChecked()
self.screens.override['size'] = QtCore.QRect(
self.custom_X_value_edit.value(),
self.custom_Y_value_edit.value(),
self.custom_width_value_edit.value(),
self.custom_height_value_edit.value())
if self.override_radio_button.isChecked():
self.screens.set_override_display()
else:
self.screens.reset_current_display()
if self.display_changed:
self.settings_form.register_post_process('config_screen_changed')
self.display_changed = False
def on_override_radio_button_pressed(self, checked):
"""
Toggle screen state depending on check box state.
:param checked: The state of the check box (boolean).
"""
self.monitor_combo_box.setDisabled(checked)
self.custom_X_value_edit.setEnabled(checked)
self.custom_Y_value_edit.setEnabled(checked)
self.custom_height_value_edit.setEnabled(checked)
self.custom_width_value_edit.setEnabled(checked)
self.display_changed = True
def on_display_changed(self):
"""
Called when the width, height, x position or y position has changed.
"""
self.display_changed = True
| crossroadchurch/paul | openlp/core/ui/generaltab.py | Python | gpl-2.0 | 20,811 |
from LOTlib import break_ctrlc
from LOTlib.Inference.Samplers.MetropolisHastings import MHSampler
from LOTlib.Examples.RationalRules.Model.Grammar import grammar
from LOTlib.Examples.RationalRules.Model.Data import data
from LOTlib.Miscellaneous import q
from Hypothesis import *
def run():
"""A version that cares more about recent data, showing how to use
Hypotheses.DecayedLikelihoodHypothesis.
"""
G = grammar
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an initial hypothesis
# This is where we set a number of relevant variables -- whether to use RR, alpha, etc.Z
h0 = MyHypothesis(G, ll_decay=1.0, rrAlpha=1.0, args=['x'])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run the MH
# Run the vanilla sampler. Without steps, it will run infinitely
# this prints out posterior (posterior_score), prior, likelihood,
for h in break_ctrlc(MHSampler(h0, data, 10000, skip=100)):
print h.posterior_score, h.prior, h.likelihood, q(h)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This setup requires the *later* data to be upweighted, meaning that hypotheses that get
# later data wrong should be given lower likelhood. But also with the decay, the overall
# magnitude of the likelihood decreases. As in:
#-12.2035235691 -9.93962659915 -2.26389696996 'and_(is_shape_(x, 'triangle'), is_color_(x, 'blue'))'
#-10.7313040795 -9.93962659915 -0.791677480378 'and_(is_shape_(x, 'triangle'), is_color_(x, 'red'))'
if __name__ == "__main__":
run()
| ebigelow/LOTlib | LOTlib/Examples/RationalRules/MemoryDecay/Run.py | Python | gpl-3.0 | 1,652 |
#encoding=utf-8
from app.proto.common.data_parse import hex_to_dec, dec_to_hex
from app.proto.common.hachi.core import XBee
from app.proto.frames.RFIDFrames import rfid_frame
class RFIDController:
def __init__(self, escaped=True):
# self.xbee = XBee()
pass
def parse_pkgs(self, bytestream):
'''
未处理leftovers数据 todo
'''
container = rfid_frame.parse(bytestream)
return container.packets, container.leftovers
def make_packet(packet, *args, **kwargs):
pass
# rfid = RFIDController()
# rfid.parse_pkgs("FF FF F1 07 0E 01 00 13 8E 88 00 04 00 47 ").pkgs
# frame = bytearray.fromhex("FF FF F1 07 0E 01 00 13 8E 88 00 04 00 47 ")
# frame = bytearray.fromhex("ff ff f1 07 44 0a 80 13 8e 00003f80139400003f 00138d00003f 00138f00003f00139a00003f00139c00003f00138a00003d00138c00003d00139900003d00139b0000066e32")
# FF FF F1 07 26 05 00 13 9A 00 00 1D 80 13 8E 00 00 1D 00 13 8C 00 00 1D 00 13 8D 00 00 15 80 13 94 00 00 13 77 F9
# pkgs,leftovers = rfid.parse_pkgs(frame)
# print pkgs
#
# data = ""
# for pkg in pkgs[0].data.block:
# for cardID in pkg.CardID:
# data += dec_to_hex(cardID)
# print hex_to_dec(data)
# print dec_to_hex(pkg.triggerID) == ""
# data = ""
# print pkgs
# print pkgs[0].data.block[0].CardID
# print pkgs[0].data.block[0].triggerID
# print pkgs[0].data.block[1].CardID
# print len(leftovers)
# data = ""
# for ii in pkgs[0].data.block[0].CardID:
# data += dec_to_hex(ii)
# print data
# sent_getReaderID = None
# sent_scanReader = None
# sent_getDoorForbValue = None
# sent_setDoorForbValue = None
#
# #设置得到阅读器id的指令
# def set_getReaderID(self,getReaderID_str_hex):
# self.sent_getReaderID = getReaderID_str_hex
# return self
#
# # get_readerID
# # 得到阅读器id:get_readerID(receive_ReaderID)
# def get_readerID(receive_ReaderID):
# return receive_ReaderID[8:10]
#
# #设置扫描电子标签的指令
# def set_scanReader(self,scanReader_str_hex):
# self.sent_scanReader = scanReader_str_hex
# return self
#
# #设置得到门禁值的指令
# def set_getDoorForbValue(self,getDoorForbValue_str_hex):
# self.sent_getDoorForbValue = getDoorForbValue_str_hex
# return self
#
# #设置设置门禁值的指令
# def set_setDoorForbValue(self,setDoorForbValue_str_hex):
# self.sent_setDoorForbValue = setDoorForbValue_str_hex
# return self
#
# # get_reader_status
# #得到阅读器的状态
# def get_reader_status(receive_scanReaderValue):
# return receive_scanReaderValue[10:12]
#
# # get_three_values
# # 解析扫描阅读器后得到的,电子标签,低频触发器,卡片的相对时间
# #返回的格式为[[str1,str2,str3],[],[],[]],其中str1是电子标签的ID,str2是低频触发器ID,str3是相对时间
# def get_three_values(
# receive_scanReaderValue): # FF FF F1 07 0E 01 00 13 8E 00 00 F2 1D 65
# scan_values = receive_scanReaderValue[12:-4]
# values_list = []
# for x in range(0, len(scan_values), 12):
# values_list.append(scan_values[0 + x:12 + x])
# print values_list[x]
# return [[hex_to_dec(y[0:6]), hex_to_dec(y[6:8]), hex_to_dec(y[8:12])] for y in values_list]
#
# print get_three_values("FFFFF1070E0100138E0000F21D65")
#
# # 得到门限值
# def get_DoorForbValue(receive_DoorForbValue):
# return receive_DoorForbValue[8:10]
#
# # set_DoorForbValue
# def set_DoorForbValue(str_intDoorForbValue):
# return "FFFFF306" + str_intDoorForbValue + "校验结果"
#
# # parse_setDoorForbResult
# #解析设置门限的返回结果信息,如果结果得到的是AA,那么就是设置成功,如果是55,那么就是设置失败
# def parse_setDoorForbResult(receive_setDoorForbResult):
# return receive_setDoorForbResult[8:10]
#
# # get_instruct_code
# # 得到指令码:get_instruct_code("FFFFF205F7")
# def get_instruct_code(str_instruct):
# return str_instruct[4:6]
#
# # get_instruct_length
# #得到指令长度:get_instruct_length("FFFFF205F7")
# # def get_instruct_length(str_instruct):
# # if (0 == cmp(str_instruct, receive_scanReaderValue)):
# # return str_instruct[8:10]
# # return str_instruct[6:8]
| kooksee/TIOT | test/project/src/app/proto/controller/RFIDController.py | Python | gpl-2.0 | 4,692 |
#!/usr/bin/env python
import logging
from pdb import set_trace
import requests
import simplejson
from time import time
import os
import facebook
# MY_API_URL
# MY_SITE_MSG
# MY_GROUP_NAME
# POST_TO_ID = None
def run():
data = get_from_cal_json()
msg = create_msg(data)
post(msg)
def get_from_cal_json():
print "Getting data from OpenACalendar"
r = requests.get(MY_API_URL)
if r.status_code != requests.codes.ok:
r.raise_for_status()
j = simplejson.loads(r.text)
now = time()
inaweek = now + 60 * 60 * 24 * 7
data = [
x for x in j['data']
if x['start']['timestamp'] > now
and x['start']['timestamp'] < inaweek
and not x['deleted']
]
print "Got Data From OpenACalendar"
return data
def create_msg(data):
for x in data:
x['displaystart'] = x['start']['displaytimezone']
msgbits = []
msgbits.append(MY_SITE_MSG + ':')
msgbits.append('')
for x in data:
msgbits.append(x['displaystart'])
msgbits.append(x['summaryDisplay'])
msgbits.append(x['url'])
msgbits.append('')
msg = '\n'.join(msgbits)
return msg
def get_group_ids(graph):
print "Getting Groups ID"
# need user_groups permission
# Why doesn't Facebook provide an API for getting the
# group id from the name?
my_groups = graph.get_connections('me', 'groups')['data']
print "Got Group ID"
# Add your group names here
group_names = [
MY_GROUP_NAME,
]
assert group_names, "Need to add some groups to post to"
return [x['id'] for x in my_groups if x['name'] in group_names]
def post(msg):
token = os.environ['FACEBOOK_ACCESS_TOKEN']
graph = facebook.GraphAPI(token)
profile = graph.get_object("me")
if POST_TO_ID is None:
group_ids = get_group_ids(graph)
else:
group_ids = [ POST_TO_ID, ]
print msg
return
for group_id in group_ids:
print "Posting to "+str(group_id)
graph.put_object(str(group_id), "feed", message=msg)
print "Posted!"
if __name__ == '__main__':
try:
MY_API_URL
except:
print "Set your MY_API_URL e.g. 'http://jazzcal.com/api1/events.json'"
exit (-1)
try:
MY_SITE_MSG
except:
print "Set your MY_SITE_MSG e.g. 'This week's jazz gigs on Jazzcal.com'"
exit (-1)
try:
MY_GROUP_NAME
except:
print "Set your MY_GROUP_NAME"
exit (-1)
try:
token = os.environ['FACEBOOK_ACCESS_TOKEN']
except:
print "Set the env var FACEBOOK_ACCESS_TOKEN"
exit (-1)
run()
# eof
| OpenACalendar/OpenACalendar-Tools-Social | example-facebook-post-weekly/facebook-post-weekly.py | Python | bsd-3-clause | 2,695 |
from ..generic import ToStrMixin, file_get_content
from itertools import ifilter, izip
from functools import wraps
import collections
import os
import os.path
def reverse_enumerate(lst):
return izip(xrange(len(lst)-1, -1, -1), reversed(lst))
def filter_predicate(file_name, file_path):
full_file = os.path.join(file_path, file_name)
return not file_name.startswith('.') and os.path.isfile(full_file)
class LicenseMixin(ToStrMixin):
def get_license(self, license):
try:
return self[license]
except (TypeError, KeyError):
return None
class Licenses(LicenseMixin):
__slots__ = ('is_valid', 'licenses_dict', 'licenses_path', 'tree_path')
def __init__(self, tree_path):
self.licenses_dict = {}
self.is_valid = False
self.tree_path = tree_path
self.licenses_path = os.path.join(tree_path, 'licenses')
if os.path.isdir(self.licenses_path):
self.is_valid = True
self._fetch_licenses_list()
def _fetch_licenses_list(self):
dir_list = os.listdir(self.licenses_path)
f = lambda x: filter_predicate(x, self.licenses_path)
licenses_list = ((s.lower(), s) for s in ifilter(f, dir_list))
self.licenses_dict = dict(licenses_list)
def __len__(self):
return len(self.licenses_dict)
def __contains__(self, item):
item = unicode(item)
return item.lower() in self.licenses_dict
def __iter__(self):
return self.licenses_dict.itervalues()
def __eq__(self, other):
if isinstance(other, Licenses):
return other.tree_path == self.tree_path
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
return LicensesSet([self, other])
def hash(self):
return hash(self.tree_path)
def get_license_path(self, license):
try:
key = unicode(license).lower()
except:
raise TypeError
return os.path.join(self.licenses_path, self.licenses_dict[key])
def __getitem__(self, key):
return file_get_content(self.get_license_path(key))
def __unicode__(self):
return unicode(self.tree_path)
def preinit_cache(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self._cache_is_init:
self._precache_objects()
return func(self, *args, **kwargs)
return wrapper
class LicensesSet(LicenseMixin):
__slots__ = ('licenses_list', 'licenses_set', '_cache', '_cache_is_init')
def __init__(self, val):
self.licenses_list = []
self.licenses_set = set()
self._cache = {}
self._cache_is_init = False
if isinstance(val, LicensesSet):
obj = val.copy()
self.licenses_list = obj.licenses_list
self.licenses_set = obj.licenses_set
self._cache = obj._cache
self._cache_is_init = obj._cache_is_init
else:
if not isinstance(val, collections.Iterable):
raise TypeError
for item in val:
if not isinstance(item, Licenses):
raise TypeError
self.add_licenses(item)
def _precache_objects(self):
cache = {}
for num, licenses in reverse_enumerate(self.licenses_list):
for key in licenses.licenses_dict.iterkeys():
cache[key] = num
self._cache = cache
self._cache_is_init = True
def copy(self):
return LicensesSet(self.licenses_list)
def add_licenses(self, licenses):
if not isinstance(licenses, Licenses):
return None
if (not licenses in self.licenses_set) and licenses.is_valid:
self.licenses_list.append(licenses)
self.licenses_set.add(licenses)
self._cache_is_init = False
def merge(self, licenses):
if isinstance(licenses, Licenses):
self.add_licenses(licenses)
elif isinstance(licenses, LicensesSet):
for licenses in licenses.licenses_list:
self.add_licenses(licenses)
else:
raise TypeError
def __or__(self, other):
try:
obj = self.copy()
obj.merge(other)
return obj
except TypeError:
return NotImplemented
def __ior__(self, other):
return self.merge(other)
@preinit_cache
def __contains__(self, item):
item = unicode(item)
return item.lower() in self._cache
@preinit_cache
def __len__(self):
return len(self._cache)
@preinit_cache
def __getitem__(self, key):
try:
key = unicode(key).lower()
except:
raise TypeError
return self.licenses_list[self._cache[key.lower()]][key]
def __unicode__(self):
res = ""
for num ,licenses in enumerate(self.licenses_list):
if num == 0:
res += repr(licenses)
else:
res += ', %s' % repr(licenses)
return '[%s]' % res
| bacher09/gentoo-packages | gpackages/libs/package_info/generic_metadata/license_text.py | Python | gpl-2.0 | 5,138 |
from xml.dom import minidom
from .oauth import BaseOAuth2
class NaverOAuth2(BaseOAuth2):
"""Naver OAuth authentication backend"""
name = 'naver'
AUTHORIZATION_URL = 'https://nid.naver.com/oauth2.0/authorize'
ACCESS_TOKEN_URL = 'https://nid.naver.com/oauth2.0/token'
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('id', 'id'),
]
def get_user_id(self, details, response):
return response.get('id')
def get_user_details(self, response):
"""Return user details from Naver account"""
return {
'username': response.get('username'),
'email': response.get('email'),
'fullname': response.get('username'),
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
response = self.request(
'https://openapi.naver.com/v1/nid/getUserProfile.xml',
headers={
'Authorization': 'Bearer {0}'.format(access_token),
'Content_Type': 'text/xml'
}
)
dom = minidom.parseString(response.text.encode('utf-8').strip())
return {
'id': self._dom_value(dom, 'id'),
'email': self._dom_value(dom, 'email'),
'username': self._dom_value(dom, 'name'),
'nickname': self._dom_value(dom, 'nickname'),
'gender': self._dom_value(dom, 'gender'),
'age': self._dom_value(dom, 'age'),
'birthday': self._dom_value(dom, 'birthday'),
'profile_image': self._dom_value(dom, 'profile_image')
}
def auth_headers(self):
client_id, client_secret = self.get_key_and_secret()
return {
'grant_type': 'authorization_code',
'code': self.data.get('code'),
'client_id': client_id,
'client_secret': client_secret,
}
def _dom_value(self, dom, key):
return dom.getElementsByTagName(key)[0].childNodes[0].data
| IKholopov/HackUPC2017 | hackupc/env/lib/python3.5/site-packages/social_core/backends/naver.py | Python | apache-2.0 | 2,000 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
from pycompss.api.task import task
from pycompss.api.mpi import mpi
from pycompss.api.constraint import constraint
from pycompss.api.api import compss_wait_on
from pycompss.api.parameter import *
@constraint(computing_units="2")
@mpi(runner="mpirun", computing_nodes="2")
@task(returns=4)
def init(seed):
from mpi4py import MPI
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
print("Launched MPI task init with {0} MPI processes".format(size))
return rank+seed
@constraint(computing_units="2")
@mpi(runner="mpirun", computing_nodes="2")
@task(input_data=COLLECTION_IN, returns=4)
def scale(input_data, i):
from mpi4py import MPI
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
a = input_data[rank]*i
print("Launched MPI scale with {0} MPI processes".format(size))
return a
@constraint(computing_units="2")
@mpi(runner="mpirun", computing_nodes="2")
@task(input_data=COLLECTION_IN, returns=4)
def increment(input_data):
from mpi4py import MPI
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
a = input_data[rank]+1
print("Launched MPI process with {0} MPI processes".format(size))
return a
@constraint(computing_units="2")
@mpi(runner="mpirun", computing_nodes="2")
@task(input_data={Type:COLLECTION_IN, Depth:2}, returns=4)
def merge(input_data):
from mpi4py import MPI
data_size = len(input_data)
size = MPI.COMM_WORLD.size
rank = MPI.COMM_WORLD.rank
batch = int(data_size/size)
a=0
for data in input_data[rank]:
a=a+data
print("Launched MPI merge with {0} MPI processes".format(size))
return a
def main():
input_data = init(0)
partial_res=[]
for i in [1,10,20,30]:
p_data = scale(input_data, i)
for j in range(2):
p_data = increment(p_data)
partial_res.append(p_data)
results= merge(partial_res)
results = compss_wait_on(results)
print("Results: " + str(results))
if results[0] != 14 or results[1] != 68 or results[2] != 128 or results[3] != 188 :
raise Exception ("Error results " + str(results) + " != [14, 68, 128, 188]")
print ("Finished without error")
if __name__ == '__main__':
main()
| mF2C/COMPSs | tests/sources/python/9_python_mpi_collection/src/python_mpi_collection.py | Python | apache-2.0 | 2,343 |
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
class ManageNFSShareTest(base.BaseSharesAdminTest):
protocol = 'nfs'
# NOTE(vponomaryov): be careful running these tests using generic driver
# because cinder volumes will stay attached to service Nova VM and
# won't be deleted.
@classmethod
@testtools.skipIf(
CONF.share.multitenancy_enabled,
"Only for driver_handles_share_servers = False driver mode.")
@testtools.skipUnless(
CONF.share.run_manage_unmanage_tests,
"Manage/unmanage tests are disabled.")
def resource_setup(cls):
super(ManageNFSShareTest, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled" % cls.protocol
raise cls.skipException(message)
# Create share types
cls.st_name = data_utils.rand_name("manage-st-name")
cls.st_name_invalid = data_utils.rand_name("manage-st-name-invalid")
cls.extra_specs = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': False,
'snapshot_support': six.text_type(
CONF.share.capability_snapshot_support),
}
cls.extra_specs_invalid = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': True,
'snapshot_support': six.text_type(
CONF.share.capability_snapshot_support),
}
cls.st = cls.create_share_type(
name=cls.st_name,
cleanup_in_class=True,
extra_specs=cls.extra_specs)
cls.st_invalid = cls.create_share_type(
name=cls.st_name_invalid,
cleanup_in_class=True,
extra_specs=cls.extra_specs_invalid)
creation_data = {'kwargs': {
'share_type_id': cls.st['share_type']['id'],
'share_protocol': cls.protocol,
}}
# Data for creating shares in parallel
data = [creation_data, creation_data]
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.5"):
data.append(creation_data)
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.8"):
data.append(creation_data)
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.16"):
data.append(creation_data)
shares_created = cls.create_shares(data)
cls.shares = []
# Load all share data (host, etc.)
for share in shares_created:
# Unmanage shares from manila
get_share = cls.shares_v2_client.get_share(share['id'])
if utils.is_microversion_ge(
CONF.share.max_api_microversion, "2.9"):
get_share["export_locations"] = (
cls.shares_v2_client.list_share_export_locations(
share["id"])
)
cls.shares.append(get_share)
cls.shares_client.unmanage_share(share['id'])
cls.shares_client.wait_for_resource_deletion(
share_id=share['id'])
def _test_manage(self, share, is_public=False,
version=CONF.share.max_api_microversion):
name = "Name for 'managed' share that had ID %s" % share['id']
description = "Description for 'managed' share"
# Manage share
managed_share = self.shares_v2_client.manage_share(
service_host=share['host'],
export_path=share['export_locations'][0],
protocol=share['share_proto'],
share_type_id=self.st['share_type']['id'],
name=name,
description=description,
is_public=is_public,
version=version,
)
# Add managed share to cleanup queue
self.method_resources.insert(
0, {'type': 'share', 'id': managed_share['id'],
'client': self.shares_client})
# Wait for success
self.shares_v2_client.wait_for_share_status(managed_share['id'],
'available')
# Verify data of managed share
self.assertEqual(name, managed_share['name'])
self.assertEqual(description, managed_share['description'])
self.assertEqual(share['host'], managed_share['host'])
self.assertEqual(share['share_proto'], managed_share['share_proto'])
if utils.is_microversion_ge(version, "2.6"):
self.assertEqual(self.st['share_type']['id'],
managed_share['share_type'])
else:
self.assertEqual(self.st['share_type']['name'],
managed_share['share_type'])
if utils.is_microversion_ge(version, "2.8"):
self.assertEqual(is_public, managed_share['is_public'])
else:
self.assertFalse(managed_share['is_public'])
if utils.is_microversion_ge(version, "2.16"):
self.assertEqual(share['user_id'], managed_share['user_id'])
else:
self.assertNotIn('user_id', managed_share)
# Delete share
self.shares_v2_client.delete_share(managed_share['id'])
self.shares_v2_client.wait_for_resource_deletion(
share_id=managed_share['id'])
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_share,
managed_share['id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.5")
def test_manage_with_os_share_manage_url(self):
self._test_manage(share=self.shares[2], version="2.5")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.8")
def test_manage_with_is_public_True(self):
self._test_manage(share=self.shares[3], is_public=True, version="2.8")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.16")
def test_manage_show_user_id(self):
self._test_manage(share=self.shares[4], version="2.16")
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_manage(self):
# After 'unmanage' operation, share instance should be deleted.
# Assert not related to 'manage' test, but placed here for
# resource optimization.
share_instance_list = self.shares_v2_client.list_share_instances()
share_ids = [si['share_id'] for si in share_instance_list]
self.assertNotIn(self.shares[0]['id'], share_ids)
self._test_manage(share=self.shares[0])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_manage_invalid(self):
# Try to manage share with invalid parameters, it should not succeed
# because the scheduler will reject it. If it succeeds, then this test
# case failed. Then, in order to remove the resource from backend, we
# need to manage it again, properly, so we can delete it. Consequently
# the second part of this test also tests that manage operation with a
# proper share type works.
def _delete_share(share_id):
self.shares_v2_client.reset_state(share_id)
self.shares_v2_client.delete_share(share_id)
self.shares_v2_client.wait_for_resource_deletion(share_id=share_id)
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_share,
share_id)
share = self.shares_v2_client.manage_share(
service_host=self.shares[1]['host'],
export_path=self.shares[1]['export_locations'][0],
protocol=self.shares[1]['share_proto'],
share_type_id=self.st_invalid['share_type']['id'])
self.addCleanup(_delete_share, share['id'])
self.shares_v2_client.wait_for_share_status(
share['id'], 'manage_error')
share = self.shares_v2_client.get_share(share['id'])
self.assertEqual(1, int(share['size']))
# Delete resource from backend. We need to manage the share properly
# so it can be removed.
share = self.shares_v2_client.manage_share(
service_host=self.shares[1]['host'],
export_path=self.shares[1]['export_locations'][0],
protocol=self.shares[1]['share_proto'],
share_type_id=self.st['share_type']['id'])
self.addCleanup(_delete_share, share['id'])
self.shares_v2_client.wait_for_share_status(
share['id'], 'available')
class ManageCIFSShareTest(ManageNFSShareTest):
protocol = 'cifs'
class ManageGLUSTERFSShareTest(ManageNFSShareTest):
protocol = 'glusterfs'
class ManageHDFSShareTest(ManageNFSShareTest):
protocol = 'hdfs'
class ManageCephFSShareTest(ManageNFSShareTest):
protocol = 'cephfs'
| NetApp/manila | manila_tempest_tests/tests/api/admin/test_share_manage.py | Python | apache-2.0 | 9,833 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import json
import random
import math
import time
from Library import toolClass
class ChaseLetv :
def __init__ (self) :
self.videoLink = ''
self.fileUrlPrefix = 'http://api.letv.com/mms/out/video/playJson?platid=1&splatid=104&tss=no&domain=www.letv.com'
self.urlSuffix = '&start=0&end=10000000000&'
self.videoTypeList = {'n': '1000', 'h': '1300', 's': '720p'}
self.videoType = 's'
self.Tools = toolClass.Tools()
def chaseUrl (self) :
result = {'stat': 0, 'msg': ''}
videoID = self.__getVideoID(self.videoLink)
if videoID :
tkey = self.__auth(time.time())
confgFileUrl = self.fileUrlPrefix + '&id=' + str(videoID) + '&tkey=' + str(tkey)
fileUrl = self.__getVideoFileUrl(confgFileUrl)
if fileUrl != False :
fileUrl = self.__getFile(fileUrl)
if fileUrl != '' > 0:
result['msg'] = [fileUrl]
else:
result['stat'] = 1
else :
result['stat'] = 1
else :
result['stat'] = 2
return result
def __getVideoID (self, link) :
result = re.findall(r"/(\d+?)\.html", link)
if len(result) > 0 :
videoID = result[0]
else :
videoID = False
return videoID
def __auth (self, now) :
key = 773625421
now = int (now)
result = self.__letvRor(now, key % 13)
result = self.Tools.xor(result, key)
result = self.__letvRor(result ,key % 17)
return result
def __getVideoFileUrl (self, confgFileUrl) :
pageHeader, pageBody = self.Tools.getPage(confgFileUrl)
info = json.JSONDecoder().decode(pageBody)
# url = str(info['playurl']['domain'][0]) + str(info['playurl']['dispatch'][self.videoTypeList[self.videoType]][0]) + '&format=1&sign=letv&expect=3000&rateid=' + self.videoTypeList[self.videoType]
url = str(info['playurl']['domain'][0]) + str(info['playurl']['dispatch'][self.videoTypeList[self.videoType]][0])
url = url.replace('tss=ios', 'tss=no')
url = url.replace('splatid=101', 'splatid=104')
return url
def __getFile (self, fileUrl) :
pageHeader, pageBody = self.Tools.getPage(fileUrl)
url = ''
if pageHeader[0] == 'HTTP/1.1 302 Moved' :
for x in pageHeader :
if x[:10] == 'Location: ' :
url = x[10:]
break
return url
def __formatList (self, data) :
result = []
temp = []
listContent = re.findall(r"http://(.*)\s+?", data)
for x in listContent:
link = re.sub(r"&start=.*?&end=.*?&", self.urlSuffix, x)
if link not in temp :
temp.append(link)
linkStr = ''
for x in temp:
result.append('http://' + x)
return result
def __letvRor (self, a, b):
i = 0
while(i < b) :
a = self.Tools.rotate(a, 1, 'r+') + self.Tools.rotate((a & 1), 31, 'l');
i += 1
return a | EvilCult/Video-Downloader | Module/letvClass.py | Python | gpl-2.0 | 2,698 |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import tempfile
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import mock
from nose.tools import assert_raises
import numpy as np
import PIL.Image
from . import errors
from . import image as image_utils
import digits
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestLoadImage():
def test_bad_path(self):
for path in [
'some string',
'/tmp/not-a-file',
'http://not-a-url',
]:
yield self.check_none, path
def check_none(self, path):
assert_raises(
errors.LoadImageError,
image_utils.load_image,
path,
)
def test_good_file(self):
for args in [
# created mode, file extension, pixel value, loaded mode (expected)
# Grayscale
('1', 'png', 1, 'L'),
('1', 'ppm', 1, 'L'),
('L', 'png', 127, 'L'),
('L', 'jpg', 127, 'L'),
('L', 'ppm', 127, 'L'),
('LA', 'png', (127, 255), 'L'),
# Color
('RGB', 'png', (127, 127, 127), 'RGB'),
('RGB', 'jpg', (127, 127, 127), 'RGB'),
('RGB', 'ppm', (127, 127, 127), 'RGB'),
('RGBA', 'png', (127, 127, 127, 255), 'RGB'),
('P', 'png', 127, 'RGB'),
('CMYK', 'jpg', (127, 127, 127, 127), 'RGB'),
('YCbCr', 'jpg', (127, 127, 127), 'RGB'),
]:
yield self.check_good_file, args
def check_good_file(self, args):
orig_mode, suffix, pixel, new_mode = args
orig = PIL.Image.new(orig_mode, (10, 10), pixel)
# temp files cause permission errors so just generate the name
tmp = tempfile.mkstemp(suffix='.' + suffix)
orig.save(tmp[1])
new = image_utils.load_image(tmp[1])
try:
# sometimes on windows the file is not closed yet
# which can cause an exception
os.close(tmp[0])
os.remove(tmp[1])
except:
pass
assert new is not None, 'load_image should never return None'
assert new.mode == new_mode, 'Image mode should be "%s", not "%s\nargs - %s' % (new_mode, new.mode, args)
@mock.patch('digits.utils.image.requests')
def test_good_url(self, mock_requests):
# requests
response = mock.Mock()
response.status_code = mock_requests.codes.ok
img_file = os.path.join(
os.path.dirname(digits.__file__),
'static',
'images',
'mona_lisa.jpg',
)
with open(img_file, 'rb') as infile:
response.content = infile.read()
mock_requests.get.return_value = response
img = image_utils.load_image('http://some-url')
assert img is not None
def test_corrupted_file(self):
image = PIL.Image.fromarray(np.zeros((10, 10, 3), dtype=np.uint8))
# Save image to a JPEG buffer.
buffer_io = StringIO()
image.save(buffer_io, format='jpeg')
encoded = buffer_io.getvalue()
buffer_io.close()
# Corrupt the second half of the image buffer.
size = len(encoded)
corrupted = encoded[:size / 2] + encoded[size / 2:][::-1]
# Save the corrupted image to a temporary file.
fname = tempfile.mkstemp(suffix='.bin')
f = os.fdopen(fname[0], 'wb')
fname = fname[1]
f.write(corrupted)
f.close()
assert_raises(
errors.LoadImageError,
image_utils.load_image,
fname,
)
os.remove(fname)
class TestResizeImage():
@classmethod
def setup_class(cls):
cls.np_gray = np.random.randint(0, 255, (10, 10)).astype('uint8')
cls.pil_gray = PIL.Image.fromarray(cls.np_gray)
cls.np_color = np.random.randint(0, 255, (10, 10, 3)).astype('uint8')
cls.pil_color = PIL.Image.fromarray(cls.np_color)
def test_configs(self):
# lots of configs tested here
for h in [10, 15]:
for w in [10, 16]:
for t in ['gray', 'color']:
# test channels=None (should autodetect channels)
if t == 'color':
s = (h, w, 3)
else:
s = (h, w)
yield self.verify_pil, (h, w, None, None, t, s)
yield self.verify_np, (h, w, None, None, t, s)
# test channels={3,1}
for c in [3, 1]:
for m in ['squash', 'crop', 'fill', 'half_crop']:
if c == 3:
s = (h, w, 3)
else:
s = (h, w)
yield self.verify_pil, (h, w, c, m, t, s)
yield self.verify_np, (h, w, c, m, t, s)
def verify_pil(self, args):
# pass a PIL.Image to resize_image and check the returned dimensions
h, w, c, m, t, s = args
if t == 'gray':
i = self.pil_gray
else:
i = self.pil_color
r = image_utils.resize_image(i, h, w, c, m)
assert r.shape == s, 'Resized PIL.Image (orig=%s) should have been %s, but was %s %s' % (
i.size, s, r.shape, self.args_to_str(args))
assert r.dtype == np.uint8, 'image.dtype should be uint8, not %s' % r.dtype
def verify_np(self, args):
# pass a numpy.ndarray to resize_image and check the returned dimensions
h, w, c, m, t, s = args
if t == 'gray':
i = self.np_gray
else:
i = self.np_color
r = image_utils.resize_image(i, h, w, c, m)
assert r.shape == s, 'Resized np.ndarray (orig=%s) should have been %s, but was %s %s' % (
i.shape, s, r.shape, self.args_to_str(args))
assert r.dtype == np.uint8, 'image.dtype should be uint8, not %s' % r.dtype
def args_to_str(self, args):
return """
height=%s
width=%s
channels=%s
resize_mode=%s
image_type=%s
shape=%s""" % args
| gheinrich/DIGITS-GAN | digits/utils/test_image.py | Python | bsd-3-clause | 6,578 |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Storage backend for S3 or Storage Servers that follow the S3 Protocol"""
import logging
import math
import re
from boto3 import session as boto_session
from botocore import client as boto_client
from botocore import exceptions as boto_exceptions
from botocore import utils as boto_utils
import eventlet
from oslo_config import cfg
from oslo_utils import encodeutils
from oslo_utils import units
import six
from six.moves import urllib
import glance_store
from glance_store import capabilities
from glance_store.common import utils
import glance_store.driver
from glance_store import exceptions
from glance_store.i18n import _
import glance_store.location
LOG = logging.getLogger(__name__)
DEFAULT_LARGE_OBJECT_SIZE = 100 # 100M
DEFAULT_LARGE_OBJECT_CHUNK_SIZE = 10 # 10M
DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE = 5 # 5M
DEFAULT_THREAD_POOLS = 10 # 10 pools
MAX_PART_NUM = 10000 # 10000 upload parts
_S3_OPTS = [
cfg.StrOpt('s3_store_host',
help="""
The host where the S3 server is listening.
This configuration option sets the host of the S3 or S3 compatible storage
Server. This option is required when using the S3 storage backend.
The host can contain a DNS name (e.g. s3.amazonaws.com, my-object-storage.com)
or an IP address (127.0.0.1).
Possible values:
* A valid DNS name
* A valid IPv4 address
Related Options:
* s3_store_access_key
* s3_store_secret_key
"""),
cfg.StrOpt('s3_store_access_key',
secret=True,
help="""
The S3 query token access key.
This configuration option takes the access key for authenticating with the
Amazon S3 or S3 compatible storage server. This option is required when using
the S3 storage backend.
Possible values:
* Any string value that is the access key for a user with appropriate
privileges
Related Options:
* s3_store_host
* s3_store_secret_key
"""),
cfg.StrOpt('s3_store_secret_key',
secret=True,
help="""
The S3 query token secret key.
This configuration option takes the secret key for authenticating with the
Amazon S3 or S3 compatible storage server. This option is required when using
the S3 storage backend.
Possible values:
* Any string value that is a secret key corresponding to the access key
specified using the ``s3_store_host`` option
Related Options:
* s3_store_host
* s3_store_access_key
"""),
cfg.StrOpt('s3_store_bucket',
help="""
The S3 bucket to be used to store the Glance data.
This configuration option specifies where the glance images will be stored
in the S3. If ``s3_store_create_bucket_on_put`` is set to true, it will be
created automatically even if the bucket does not exist.
Possible values:
* Any string value
Related Options:
* s3_store_create_bucket_on_put
* s3_store_bucket_url_format
"""),
cfg.BoolOpt('s3_store_create_bucket_on_put',
default=False,
help="""
Determine whether S3 should create a new bucket.
This configuration option takes boolean value to indicate whether Glance should
create a new bucket to S3 if it does not exist.
Possible values:
* Any Boolean value
Related Options:
* None
"""),
cfg.StrOpt('s3_store_bucket_url_format',
default='auto',
help="""
The S3 calling format used to determine the object.
This configuration option takes access model that is used to specify the
address of an object in an S3 bucket.
NOTE:
In ``path``-style, the endpoint for the object looks like
'https://s3.amazonaws.com/bucket/example.img'.
And in ``virtual``-style, the endpoint for the object looks like
'https://bucket.s3.amazonaws.com/example.img'.
If you do not follow the DNS naming convention in the bucket name, you can
get objects in the path style, but not in the virtual style.
Possible values:
* Any string value of ``auto``, ``virtual``, or ``path``
Related Options:
* s3_store_bucket
"""),
cfg.IntOpt('s3_store_large_object_size',
default=DEFAULT_LARGE_OBJECT_SIZE,
help="""
What size, in MB, should S3 start chunking image files and do a multipart
upload in S3.
This configuration option takes a threshold in MB to determine whether to
upload the image to S3 as is or to split it (Multipart Upload).
Note: You can only split up to 10,000 images.
Possible values:
* Any positive integer value
Related Options:
* s3_store_large_object_chunk_size
* s3_store_thread_pools
"""),
cfg.IntOpt('s3_store_large_object_chunk_size',
default=DEFAULT_LARGE_OBJECT_CHUNK_SIZE,
help="""
What multipart upload part size, in MB, should S3 use when uploading parts.
This configuration option takes the image split size in MB for Multipart
Upload.
Note: You can only split up to 10,000 images.
Possible values:
* Any positive integer value (must be greater than or equal to 5M)
Related Options:
* s3_store_large_object_size
* s3_store_thread_pools
"""),
cfg.IntOpt('s3_store_thread_pools',
default=DEFAULT_THREAD_POOLS,
help="""
The number of thread pools to perform a multipart upload in S3.
This configuration option takes the number of thread pools when performing a
Multipart Upload.
Possible values:
* Any positive integer value
Related Options:
* s3_store_large_object_size
* s3_store_large_object_chunk_size
""")
]
class UploadPart(object):
"""The class for the upload part."""
def __init__(self, mpu, fp, partnum, chunks):
self.mpu = mpu
self.partnum = partnum
self.fp = fp
self.size = 0
self.chunks = chunks
self.etag = {}
self.success = True
def run_upload(s3_client, bucket, key, part):
"""Upload the upload part into S3 and set returned etag and size to its
part info.
:param s3_client: An object with credentials to connect to S3
:param bucket: The S3 bucket name
:param key: The object name to be stored (image identifier)
:param part: UploadPart object which used during multipart upload
"""
pnum = part.partnum
bsize = part.chunks
upload_id = part.mpu['UploadId']
LOG.debug("Uploading upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, key=%(key)s, UploadId=%(UploadId)s",
{'pnum': pnum, 'bsize': bsize, 'key': key,
'UploadId': upload_id})
try:
key = s3_client.upload_part(Body=part.fp,
Bucket=bucket,
ContentLength=bsize,
Key=key,
PartNumber=pnum,
UploadId=upload_id)
part.etag[part.partnum] = key['ETag']
part.size = bsize
except boto_exceptions.ClientError as e:
error_code = e.response['Error']['Code']
error_message = e.response['Error']['Message']
LOG.warning("Failed to upload part in S3 partnum=%(pnum)d, "
"size=%(bsize)d, error code=%(error_code)d, "
"error message=%(error_message)s",
{'pnum': pnum, 'bsize': bsize, 'error_code': error_code,
'error_message': error_message})
part.success = False
finally:
part.fp.close()
class StoreLocation(glance_store.location.StoreLocation):
"""Class describing an S3 URI.
An S3 URI can look like any of the following:
s3://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
s3+https://accesskey:secretkey@s3.amazonaws.com/bucket/key-id
The s3+https:// URIs indicate there is an HTTPS s3service URL
"""
def process_specs(self):
self.scheme = self.specs.get('scheme', 's3')
self.accesskey = self.specs.get('accesskey')
self.secretkey = self.specs.get('secretkey')
s3_host = self.specs.get('s3serviceurl')
self.bucket = self.specs.get('bucket')
self.key = self.specs.get('key')
if s3_host.startswith('https://'):
self.scheme = 's3+https'
s3_host = s3_host[len('https://'):].strip('/')
elif s3_host.startswith('http://'):
s3_host = s3_host[len('http://'):].strip('/')
self.s3serviceurl = s3_host.strip('/')
def _get_credstring(self):
if self.accesskey:
return '%s:%s@' % (self.accesskey, self.secretkey)
return ''
def get_uri(self):
return "%s://%s%s/%s/%s" % (self.scheme, self._get_credstring(),
self.s3serviceurl, self.bucket, self.key)
def parse_uri(self, uri):
"""Parse URLs.
Note that an Amazon AWS secret key can contain the forward slash,
which is entirely retarded, and breaks urlparse miserably.
This function works around that issue.
"""
# Make sure that URIs that contain multiple schemes, such as:
# s3://accesskey:secretkey@https://s3.amazonaws.com/bucket/key-id
# are immediately rejected.
if uri.count('://') != 1:
reason = ("URI cannot contain more than one occurrence "
"of a scheme. If you have specified a URI like "
"s3://accesskey:secretkey@"
"https://s3.amazonaws.com/bucket/key-id"
", you need to change it to use the "
"s3+https:// scheme, like so: "
"s3+https://accesskey:secretkey@"
"s3.amazonaws.com/bucket/key-id")
LOG.info("Invalid store uri: %s", reason)
raise exceptions.BadStoreUri(uri=uri)
pieces = urllib.parse.urlparse(uri)
self.validate_schemas(uri, valid_schemas=(
's3://', 's3+http://', 's3+https://'))
self.scheme = pieces.scheme
path = pieces.path.strip('/')
netloc = pieces.netloc.strip('/')
entire_path = (netloc + '/' + path).strip('/')
if '@' in uri:
creds, path = entire_path.split('@')
cred_parts = creds.split(':')
try:
self.accesskey = cred_parts[0]
self.secretkey = cred_parts[1]
except IndexError:
LOG.error("Badly formed S3 credentials")
raise exceptions.BadStoreUri(uri=uri)
else:
self.accesskey = None
path = entire_path
try:
path_parts = path.split('/')
self.key = path_parts.pop()
self.bucket = path_parts.pop()
if path_parts:
self.s3serviceurl = '/'.join(path_parts).strip('/')
else:
LOG.error("Badly formed S3 URI. Missing s3 service URL.")
raise exceptions.BadStoreUri(uri=uri)
except IndexError:
LOG.error("Badly formed S3 URI")
raise exceptions.BadStoreUri(uri=uri)
class Store(glance_store.driver.Store):
"""An implementation of the s3 adapter."""
_CAPABILITIES = capabilities.BitMasks.RW_ACCESS
OPTIONS = _S3_OPTS
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
READ_CHUNKSIZE = 64 * units.Ki
WRITE_CHUNKSIZE = 5 * units.Mi
@staticmethod
def get_schemes():
return 's3', 's3+http', 's3+https'
def configure_add(self):
"""
Configure the Store to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exceptions.BadStoreConfiguration`
"""
self.s3_host = self._option_get('s3_store_host')
self.access_key = self._option_get('s3_store_access_key')
self.secret_key = self._option_get('s3_store_secret_key')
self.bucket = self._option_get('s3_store_bucket')
self.scheme = 's3'
if self.s3_host.startswith('https://'):
self.scheme = 's3+https'
self.full_s3_host = self.s3_host
elif self.s3_host.startswith('http://'):
self.full_s3_host = self.s3_host
else: # Defaults http
self.full_s3_host = 'http://' + self.s3_host
_s3_obj_size = self._option_get('s3_store_large_object_size')
self.s3_store_large_object_size = _s3_obj_size * units.Mi
_s3_ck_size = self._option_get('s3_store_large_object_chunk_size')
_s3_ck_min = DEFAULT_LARGE_OBJECT_MIN_CHUNK_SIZE
if _s3_ck_size < _s3_ck_min:
reason = _("s3_store_large_object_chunk_size must be at "
"least %d MB.") % _s3_ck_min
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
self.s3_store_large_object_chunk_size = _s3_ck_size * units.Mi
self.s3_store_thread_pools = self._option_get('s3_store_thread_pools')
if self.s3_store_thread_pools <= 0:
reason = _("s3_store_thread_pools must be a positive "
"integer. %s") % self.s3_store_thread_pools
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
if self.backend_group:
self._set_url_prefix()
def _set_url_prefix(self):
s3_host = self.s3_host
if s3_host.startswith('http://'):
s3_host = s3_host[len('http://'):]
elif s3_host.startswith('https://'):
s3_host = s3_host[len('https://'):]
self._url_prefix = "%s://%s:%s@%s/%s" % (self.scheme, self.access_key,
self.secret_key, s3_host,
self.bucket)
def _option_get(self, param):
if self.backend_group:
store_conf = getattr(self.conf, self.backend_group)
else:
store_conf = self.conf.glance_store
result = getattr(store_conf, param)
if not result:
if param == 's3_store_create_bucket_on_put':
return result
reason = _("Could not find %s in configuration options.") % param
LOG.error(reason)
raise exceptions.BadStoreConfiguration(store_name="s3",
reason=reason)
return result
def _create_s3_client(self, loc):
"""Create a client object to use when connecting to S3.
:param loc: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:returns: An object with credentials to connect to S3
"""
s3_host = self._option_get('s3_store_host')
url_format = self._option_get('s3_store_bucket_url_format')
calling_format = {'addressing_style': url_format}
session = boto_session.Session(aws_access_key_id=loc.accesskey,
aws_secret_access_key=loc.secretkey)
config = boto_client.Config(s3=calling_format)
location = get_s3_location(s3_host)
bucket_name = loc.bucket
if (url_format == 'virtual' and
not boto_utils.check_dns_name(bucket_name)):
raise boto_exceptions.InvalidDNSNameError(bucket_name=bucket_name)
region_name, endpoint_url = None, None
if location:
region_name = location
else:
endpoint_url = s3_host
return session.client(service_name='s3',
endpoint_url=endpoint_url,
region_name=region_name,
use_ssl=(loc.scheme == 's3+https'),
config=config)
def _operation_set(self, loc):
"""Objects and variables frequently used when operating S3 are
returned together.
:param loc: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
"returns: tuple of: (1) S3 client object, (2) Bucket name,
(3) Image Object name
"""
return self._create_s3_client(loc), loc.bucket, loc.key
@capabilities.check
def get(self, location, offset=0, chunk_size=None, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file, and returns a tuple of generator
(for reading the image file) and image_size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
"""
loc = location.store_location
s3_client, bucket, key = self._operation_set(loc)
if not self._object_exists(s3_client, bucket, key):
LOG.warning("Could not find key %(key)s in "
"bucket %(bucket)s", {'key': key, 'bucket': bucket})
raise exceptions.NotFound(image=key)
key = s3_client.get_object(Bucket=bucket, Key=key)
LOG.debug("Retrieved image object from S3 using s3_host=%(s3_host)s, "
"access_key=%(accesskey)s, bucket=%(bucket)s, "
"key=%(key)s)",
{'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey,
'bucket': bucket, 'key': key})
cs = self.READ_CHUNKSIZE
class ResponseIndexable(glance_store.Indexable):
def another(self):
try:
return next(self.wrapped)
except StopIteration:
return b''
return (ResponseIndexable(utils.chunkiter(key['Body'], cs),
key['ContentLength']), key['ContentLength'])
def get_size(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file and returns the image size
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: `glance_store.exceptions.NotFound` if image does not exist
:rtype: int
"""
loc = location.store_location
s3_client, bucket, key = self._operation_set(loc)
if not self._object_exists(s3_client, bucket, key):
LOG.warning("Could not find key %(key)s in "
"bucket %(bucket)s", {'key': key, 'bucket': bucket})
raise exceptions.NotFound(image=key)
key = s3_client.head_object(Bucket=bucket, Key=key)
return key['ContentLength']
@capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo, context=None,
verifier=None):
"""
Stores an image file with supplied identifier to the backend
storage system and returns a tuple containing information
about the stored image.
:param image_id: The opaque image identifier
:param image_file: The image data to write, as a file-like object
:param image_size: The size of the image data to write, in bytes
:param hashing_algo: A hashlib algorithm identifier (string)
:param context: A context object
:param verifier: An object used to verify signatures for images
:returns: tuple of: (1) URL in backing store, (2) bytes written,
(3) checksum, (4) multihash value, and (5) a dictionary
with storage system specific information
:raises: `glance_store.exceptions.Duplicate` if the image already
exists
"""
loc = StoreLocation(store_specs={'scheme': self.scheme,
'bucket': self.bucket,
'key': image_id,
's3serviceurl': self.full_s3_host,
'accesskey': self.access_key,
'secretkey': self.secret_key},
conf=self.conf,
backend_group=self.backend_group)
s3_client, bucket, key = self._operation_set(loc)
if not self._bucket_exists(s3_client, bucket):
if self._option_get('s3_store_create_bucket_on_put'):
self._create_bucket(s3_client,
self._option_get('s3_store_host'),
bucket)
else:
msg = (_("The bucket %s does not exist in "
"S3. Please set the "
"s3_store_create_bucket_on_put option "
"to add bucket to S3 automatically.") % bucket)
raise glance_store.BackendException(msg)
LOG.debug("Adding image object to S3 using (s3_host=%(s3_host)s, "
"access_key=%(access_key)s, bucket=%(bucket)s, "
"key=%(key)s)",
{'s3_host': self.s3_host, 'access_key': loc.accesskey,
'bucket': bucket, 'key': key})
if not self._object_exists(s3_client, bucket, key):
if image_size < self.s3_store_large_object_size:
return self._add_singlepart(s3_client=s3_client,
image_file=image_file,
bucket=bucket,
key=key,
loc=loc,
hashing_algo=hashing_algo,
verifier=verifier)
return self._add_multipart(s3_client=s3_client,
image_file=image_file,
image_size=image_size,
bucket=bucket,
key=key,
loc=loc,
hashing_algo=hashing_algo,
verifier=verifier)
LOG.warning("S3 already has an image with bucket ID %(bucket)s, "
"key %(key)s", {'bucket': bucket, 'key': key})
raise exceptions.Duplicate(image=key)
def _add_singlepart(self, s3_client, image_file, bucket, key, loc,
hashing_algo, verifier):
"""Stores an image file with a single part upload to S3 backend.
:param s3_client: An object with credentials to connect to S3
:param image_file: The image data to write, as a file-like object
:param bucket: S3 bucket name
:param key: The object name to be stored (image identifier)
:param loc: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:param hashing_algo: A hashlib algorithm identifier (string)
:param verifier: An object used to verify signatures for images
:returns: tuple of: (1) URL in backing store, (2) bytes written,
(3) checksum, (4) multihash value, and (5) a dictionary
with storage system specific information
"""
os_hash_value = utils.get_hasher(hashing_algo, False)
checksum = utils.get_hasher('md5', False)
image_data = b''
image_size = 0
for chunk in utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE):
image_data += chunk
image_size += len(chunk)
os_hash_value.update(chunk)
checksum.update(chunk)
if verifier:
verifier.update(chunk)
s3_client.put_object(Body=image_data,
Bucket=bucket,
Key=key)
hash_hex = os_hash_value.hexdigest()
checksum_hex = checksum.hexdigest()
# Add store backend information to location metadata
metadata = {}
if self.backend_group:
metadata['store'] = self.backend_group
LOG.debug("Wrote %(size)d bytes to S3 key named %(key)s "
"with checksum %(checksum)s",
{'size': image_size, 'key': key, 'checksum': checksum_hex})
return loc.get_uri(), image_size, checksum_hex, hash_hex, metadata
def _add_multipart(self, s3_client, image_file, image_size, bucket,
key, loc, hashing_algo, verifier):
"""Stores an image file with a multi part upload to S3 backend.
:param s3_client: An object with credentials to connect to S3
:param image_file: The image data to write, as a file-like object
:param bucket: S3 bucket name
:param key: The object name to be stored (image identifier)
:param loc: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:param hashing_algo: A hashlib algorithm identifier (string)
:param verifier: An object used to verify signatures for images
:returns: tuple of: (1) URL in backing store, (2) bytes written,
(3) checksum, (4) multihash value, and (5) a dictionary
with storage system specific information
"""
os_hash_value = utils.get_hasher(hashing_algo, False)
checksum = utils.get_hasher('md5', False)
pool_size = self.s3_store_thread_pools
pool = eventlet.greenpool.GreenPool(size=pool_size)
mpu = s3_client.create_multipart_upload(Bucket=bucket, Key=key)
upload_id = mpu['UploadId']
LOG.debug("Multipart initiate key=%(key)s, UploadId=%(UploadId)s",
{'key': key, 'UploadId': upload_id})
cstart = 0
plist = []
chunk_size = int(math.ceil(float(image_size) / MAX_PART_NUM))
write_chunk_size = max(self.s3_store_large_object_chunk_size,
chunk_size)
it = utils.chunkreadable(image_file, self.WRITE_CHUNKSIZE)
buffered_chunk = b''
while True:
try:
buffered_clen = len(buffered_chunk)
if buffered_clen < write_chunk_size:
# keep reading data
read_chunk = next(it)
buffered_chunk += read_chunk
continue
else:
write_chunk = buffered_chunk[:write_chunk_size]
remained_data = buffered_chunk[write_chunk_size:]
os_hash_value.update(write_chunk)
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, s3_client, bucket, key, part)
plist.append(part)
cstart += 1
buffered_chunk = remained_data
except StopIteration:
if len(buffered_chunk) > 0:
# Write the last chunk data
write_chunk = buffered_chunk
os_hash_value.update(write_chunk)
checksum.update(write_chunk)
if verifier:
verifier.update(write_chunk)
fp = six.BytesIO(write_chunk)
fp.seek(0)
part = UploadPart(mpu, fp, cstart + 1, len(write_chunk))
pool.spawn_n(run_upload, s3_client, bucket, key, part)
plist.append(part)
break
pedict = {}
total_size = 0
pool.waitall()
for part in plist:
pedict.update(part.etag)
total_size += part.size
success = True
for part in plist:
if not part.success:
success = False
if success:
# Complete
mpu_list = self._get_mpu_list(pedict)
s3_client.complete_multipart_upload(Bucket=bucket,
Key=key,
MultipartUpload=mpu_list,
UploadId=upload_id)
hash_hex = os_hash_value.hexdigest()
checksum_hex = checksum.hexdigest()
# Add store backend information to location metadata
metadata = {}
if self.backend_group:
metadata['store'] = self.backend_group
LOG.info("Multipart complete key=%(key)s "
"UploadId=%(UploadId)s "
"Wrote %(total_size)d bytes to S3 key "
"named %(key)s "
"with checksum %(checksum)s",
{'key': key, 'UploadId': upload_id,
'total_size': total_size, 'checksum': checksum_hex})
return loc.get_uri(), total_size, checksum_hex, hash_hex, metadata
# Abort
s3_client.abort_multipart_upload(Bucket=bucket, Key=key,
UploadId=upload_id)
LOG.error("Some parts failed to upload to S3. "
"Aborted the key=%s", key)
msg = _("Failed to add image object to S3. key=%s") % key
raise glance_store.BackendException(msg)
@capabilities.check
def delete(self, location, context=None):
"""
Takes a `glance_store.location.Location` object that indicates
where to find the image file to delete.
:param location: `glance_store.location.Location` object, supplied
from glance_store.location.get_location_from_uri()
:raises: NotFound if image does not exist;
InUseByStore if image is in use or snapshot unprotect failed
"""
loc = location.store_location
s3_client, bucket, key = self._operation_set(loc)
if not self._object_exists(s3_client, bucket, key):
LOG.warning("Could not find key %(key)s in bucket %(bucket)s",
{'key': key, 'bucket': bucket})
raise exceptions.NotFound(image=key)
LOG.debug("Deleting image object from S3 using s3_host=%(s3_host)s, "
"accesskey=%(accesskey)s, bucket=%(bucket)s, key=%(key)s)",
{'s3_host': loc.s3serviceurl, 'accesskey': loc.accesskey,
'bucket': bucket, 'key': key})
return s3_client.delete_object(Bucket=bucket, Key=key)
@staticmethod
def _bucket_exists(s3_client, bucket):
"""Check whether bucket exists in the S3.
:param s3_client: An object with credentials to connect to S3
:param bucket: S3 bucket name
:returns: boolean value; If the value is true, the bucket is exist
if false, it is not.
:raises: BadStoreConfiguration if cannot connect to S3 successfully
"""
try:
s3_client.head_bucket(Bucket=bucket)
except boto_exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if error_code == '404':
return False
msg = ("Failed to get bucket info: %s" %
encodeutils.exception_to_unicode(e))
LOG.error(msg)
raise glance_store.BadStoreConfiguration(store_name='s3',
reason=msg)
else:
return True
@staticmethod
def _object_exists(s3_client, bucket, key):
"""Check whether object exists in the specific bucket of S3.
:param s3_client: An object with credentials to connect to S3
:param bucket: S3 bucket name
:param key: The image object name
:returns: boolean value; If the value is true, the object is exist
if false, it is not.
:raises: BadStoreConfiguration if cannot connect to S3 successfully
"""
try:
s3_client.head_object(Bucket=bucket, Key=key)
except boto_exceptions.ClientError as e:
error_code = e.response['Error']['Code']
if error_code == '404':
return False
msg = ("Failed to get object info: %s" %
encodeutils.exception_to_unicode(e))
LOG.error(msg)
raise glance_store.BadStoreConfiguration(store_name='s3',
reason=msg)
else:
return True
@staticmethod
def _create_bucket(s3_client, s3_host, bucket):
"""Create bucket into the S3.
:param s3_client: An object with credentials to connect to S3
:param s3_host: S3 endpoint url
:param bucket: S3 bucket name
:raises: BadStoreConfiguration if cannot connect to S3 successfully
"""
region = get_s3_location(s3_host)
try:
s3_client.create_bucket(
Bucket=bucket,
) if region == '' else s3_client.create_bucket(
Bucket=bucket,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
except boto_exceptions.ClientError as e:
msg = ("Failed to add bucket to S3: %s" %
encodeutils.exception_to_unicode(e))
LOG.error(msg)
raise glance_store.BadStoreConfiguration(store_name='s3',
reason=msg)
@staticmethod
def _get_mpu_list(pedict):
"""Convert an object type and struct for use in
boto3.client('s3').complete_multipart_upload.
:param pedict: dict which containing UploadPart.etag
:returns: list with pedict converted properly
"""
return {
'Parts': [
{
'PartNumber': pnum,
'ETag': etag
} for pnum, etag in six.iteritems(pedict)
]
}
def get_s3_location(s3_host):
"""Get S3 region information from ``s3_store_host``.
:param s3_host: S3 endpoint url
:returns: string value; region information which user wants to use on
Amazon S3, and if user wants to use S3 compatible storage,
returns ''
"""
locations = {
's3.amazonaws.com': '',
's3-us-east-1.amazonaws.com': 'us-east-1',
's3-us-east-2.amazonaws.com': 'us-east-2',
's3-us-west-1.amazonaws.com': 'us-west-1',
's3-us-west-2.amazonaws.com': 'us-west-2',
's3-ap-east-1.amazonaws.com': 'ap-east-1',
's3-ap-south-1.amazonaws.com': 'ap-south-1',
's3-ap-northeast-1.amazonaws.com': 'ap-northeast-1',
's3-ap-northeast-2.amazonaws.com': 'ap-northeast-2',
's3-ap-northeast-3.amazonaws.com': 'ap-northeast-3',
's3-ap-southeast-1.amazonaws.com': 'ap-southeast-1',
's3-ap-southeast-2.amazonaws.com': 'ap-southeast-2',
's3-ca-central-1.amazonaws.com': 'ca-central-1',
's3-cn-north-1.amazonaws.com.cn': 'cn-north-1',
's3-cn-northwest-1.amazonaws.com.cn': 'cn-northwest-1',
's3-eu-central-1.amazonaws.com': 'eu-central-1',
's3-eu-west-1.amazonaws.com': 'eu-west-1',
's3-eu-west-2.amazonaws.com': 'eu-west-2',
's3-eu-west-3.amazonaws.com': 'eu-west-3',
's3-eu-north-1.amazonaws.com': 'eu-north-1',
's3-sa-east-1.amazonaws.com': 'sa-east-1'
}
# strip off scheme and port if present
key = re.sub(r'^(https?://)?(?P<host>[^:]+[^/])(:[0-9]+)?/?$',
r'\g<host>',
s3_host)
return locations.get(key, '')
| openstack/glance_store | glance_store/_drivers/s3.py | Python | apache-2.0 | 36,786 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_adcpt_acfgm
@fid marine-integrations/mi/dataset/parser/test/test_adcpt_acfgm.py
@author Ronald Ronquillo
@brief Test code for a Adcpt_Acfgm_Dcl data parser
"""
from nose.plugins.attrib import attr
import os
from mi.core.log import get_logger
from mi.dataset.parser.utilities import particle_to_yml
log = get_logger()
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.adcpt_acfgm.dcl.pd8.adcpt_acfgm_dcl_pd8_driver_common import \
AdcptAcfgmPd8Parser, MODULE_NAME, ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, \
ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS
from mi.dataset.driver.adcpt_acfgm.dcl.pd8.resource import RESOURCE_PATH
@attr('UNIT', group='mi')
class AdcptAcfgmPd8ParserUnitTestCase(ParserUnitTestCase):
"""
Adcpt_Acfgm_Dcl Parser unit test suite
"""
def create_parser(self, particle_class, file_handle):
"""
This function creates a AdcptAcfgmDcl parser for recovered data.
"""
parser = AdcptAcfgmPd8Parser(
{DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: particle_class},
file_handle,
self.exception_callback)
return parser
def open_file(self, filename):
my_file = open(os.path.join(RESOURCE_PATH, filename), mode='rU')
return my_file
def setUp(self):
ParserUnitTestCase.setUp(self)
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_parse_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done in the
tests below.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles in this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
def test_recov(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
self.assert_particles(result, '20131201.adcp_mod_recov.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_telem(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
self.assert_particles(result, '20131201.adcp_mod.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
# Line 1: DCL Log missing opening square bracket
# Line 40: Timestamp day has a float
# Line 79: Heading is not a float
# Line 118: Temp is not a float
# Line 119: Header typo
# Line 158: Timestamp has non digit
# Line 197: Timestamp missing milliseconds
# Line 234: Bin missing
# Line 272: Dir missing
# Line 310: Mag missing
# Line 348: E/W missing
# Line 386: N/S missing
# Line 424: Vert missing
# Line 462: Err missing
# Line 500: Echo1 missing
# Line 538: Echo2 missing
# Line 576: Echo3 missing
# Line 614: Echo4 missing
# Line 652: Dir is not a float
# Line 690: Dir has a non digit
# Line 728: Mag is not a float
# Line 766: Mag has a non digit
# Line 804: E/W is a float
# Line 842: E/W has a non digit
# Line 880: N/S is a float
# Line 918: N/S is a non digit
# Line 956: Vert is a float
# Line 994: Vert is a non digit
# Line 1032: Err is a float
# Line 1070: Err has a non digit
# Line 1108: Echo1 is a float
# Line 1146: Echo1 has a non digit
# Line 1184: Echo2 is a float
# Line 1222: Echo2 has a non digit
# Line 1260: Echo3 is negative
# Line 1298: Timestamp missing secconds
# Line 1331: DCL Logging missing closing square bracket
# Line 1384: Ensemble number is a float
# Line 1409: Pitch is not a float
# Line 1448: Speed of sound is a float
# Line 1485: Roll is not a float
# Line 1523: Heading has a non digit
# Line 1561: Pitch has a non digit
# Line 1599: Roll has a non digit
fid = open(os.path.join(RESOURCE_PATH, '20131201.adcp_corrupt.log'), 'rb')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, fid)
parser.get_records(66)
for i in range(len(self.exception_callback_value)):
self.assert_(isinstance(self.exception_callback_value[i], RecoverableSampleException))
log.debug('Exception: %s', self.exception_callback_value[i])
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
fid.close()
def test_telem_3021(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
This test uses a real file from a deployment.
Used to verify fixes in responses to Redmine # 3021
"""
in_file = self.open_file('20141208.adcp.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 14)
self.assert_particles(result, '20141208.adcp.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_telem_9692(self):
"""
Test to verify change made to dcl_file_common.py works with DCL
timestamps containing seconds >59
"""
in_file = self.open_file('20131201.adcpA.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(20)
self.assertEqual(len(result), 1)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
| janeen666/mi-instrument | mi/dataset/parser/test/test_adcpt_acfgm_dcl_pd8.py | Python | bsd-2-clause | 7,347 |
#!/usr/bin/env python
import sys
sys.path.append('..')
sys.path.append('.')
from jumper.blueprints.backends.modules.ctypes.ffi_ctypes import FFICTypes
t = FFICTypes()
t.example_world()
| tristanfisher/ffi4wd | tests/test_ctypes.py | Python | agpl-3.0 | 188 |
# -*- coding: utf-8 -*-
# Copyright (c), 2011, the txievery authors. See the LICENSE file for details.
"""
Paypal Express Checkout support.
"""
| lvh/txievery | txievery/expresscheckout/__init__.py | Python | isc | 144 |
# -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.contrib.auth import get_user_model
from akvo.rsr.tests.base import BaseTestCase
from akvo.rsr.tests.utils import ProjectFixtureBuilder
from akvo.rsr.models import (
Organisation, Partnership, PartnerSite, Project, ProjectUpdate
)
User = get_user_model()
class PartnerSiteModelTestCase(BaseTestCase):
"""Tests for the partner site model"""
def setUp(self):
super(PartnerSiteModelTestCase, self).setUp()
# Clear all projects, users since some tests may not tear down!
self.tearDown()
# Setup a project with results framework and a user
self.project_1 = self.create_project("Test project 1")
self.project_2 = self.create_project("Test project 2")
self.org_1 = self.create_organisation('Org 1')
self.org_2 = self.create_organisation('Org 2')
self.user = self.create_user('user1@com.com')
self.update_1 = ProjectUpdate.objects.create(title="Test update 1", project=self.project_1,
user=self.user)
self.update_2 = ProjectUpdate.objects.create(title="Test update 2", project=self.project_1,
user=self.user)
self.partnership_1 = self.make_partner(
self.project_1, self.org_1, Partnership.IATI_EXTENDING_PARTNER)
self.partnership_2 = self.make_partner(
self.project_2, self.org_1, Partnership.IATI_EXTENDING_PARTNER)
self.partnership_3 = self.make_partner(
self.project_1, self.org_2, Partnership.IATI_EXTENDING_PARTNER)
def tearDown(self):
Project.objects.all().delete()
User.objects.all().delete()
Organisation.objects.all().delete()
ProjectUpdate.objects.all().delete()
Partnership.objects.all().delete()
PartnerSite.objects.all().delete()
def test_pages_projects(self):
"""Basic test that page.projects returns the correct number of projects"""
# Given
org_1 = self.org_1
org_2 = self.org_2
# When
page_1 = PartnerSite.objects.create(organisation=org_1, hostname="page1")
page_2 = PartnerSite.objects.create(organisation=org_2, hostname="page2")
# Then
projects_1 = page_1.projects()
self.assertEqual(projects_1.count(), 2)
projects_2 = page_2.projects()
self.assertEqual(projects_2.count(), 1)
def test_redirect_logo_url(self):
# When
response = self.c.get('/en/logo/', follow=False)
# Then
self.assertEqual(302, response.status_code)
self.assertTrue(response.url.endswith('/rsr/images/rsrLogo.svg'))
def test_partner_site_redirect_logo_url(self):
# Given
site = PartnerSite.objects.create(organisation=self.org_1, hostname="page1")
site.custom_logo = 'custom.png'
site.save()
self.c.defaults['HTTP_HOST'] = '{}.localakvoapp.org'.format(site.hostname)
# When
response = self.c.get('/en/logo/', follow=False)
# Then
self.assertEqual(302, response.status_code)
self.assertEqual(response.url, '/media/custom.png')
def test_redirect_css_url(self):
# When
response = self.c.get('/en/css/', follow=False)
# Then
self.assertEqual(404, response.status_code)
def test_partner_site_redirect_css_url(self):
# Given
site = PartnerSite.objects.create(organisation=self.org_1, hostname="page1")
site.custom_css = 'custom.css'
site.save()
self.c.defaults['HTTP_HOST'] = '{}.localakvoapp.org'.format(site.hostname)
# When
response = self.c.get('/en/css/', follow=False)
# Then
self.assertEqual(302, response.status_code)
self.assertEqual(response.url, '/media/custom.css')
class PartnerSiteForProgramTestCase(BaseTestCase):
def test_partner_site_for_program(self):
org = self.create_organisation('Acme')
single_project = ProjectFixtureBuilder()\
.with_partner(org, Partnership.IATI_REPORTING_ORGANISATION)\
.build()
root_project = ProjectFixtureBuilder()\
.with_partner(org, Partnership.IATI_REPORTING_ORGANISATION)\
.with_contributors([
{'title': 'Contrib #1', 'contributors': [{'title': 'Subcon #1.1'}]},
{'title': 'Contrib #2'}
])\
.build()
program = self.create_project_hierarchy(org, root_project.object, 2)
program_page = PartnerSite.objects.create(organisation=org, hostname="program", program=program)
self.assertEqual(4, program_page.projects().count())
self.assertNotIn(single_project.object.id, program_page.projects().values_list('id', flat=True))
| akvo/akvo-rsr | akvo/rsr/tests/models/test_partner_site.py | Python | agpl-3.0 | 5,096 |
#!/usr/bin/env python
import sys
import csv
import os
import decimal
os.environ['DJANGO_SETTINGS_MODULE'] = 'pombola.settings'
sys.path.append('../../../')
sys.path.append('../../')
from django.utils.text import slugify
from pombola.core.models import Place
from pombola.place_data.models import Entry
place_kind_slug = sys.argv[1]
filename = sys.argv[2]
csv_file = open(filename, 'rU')
csv_reader = csv.DictReader(csv_file, dialect='excel')
# Get rid of the padding around the fieldnames
csv_reader.fieldnames = [x.strip() for x in csv_reader.fieldnames]
for row in csv_reader:
try:
place_slug = row['slug'].strip()
except KeyError:
# If there's no slug column, try slugifying the name column
# This will currently only happen on the Counties - the constituency
# spreadsheet has slugs.
# If we needed this to work for constituencies, we'd have to not add
# -constituency on the end as they don't have that.
place_slug = slugify(row['name'].strip()) + '-' + place_kind_slug
# Check place with this slug exists and is of the right kind.
try:
place = Place.objects.get(slug=place_slug, kind__slug=place_kind_slug)
except Place.DoesNotExist:
print "Cannot find %s with slug %s, continuing with next place." % (place_kind_slug, place_slug)
continue
try:
data_row = Entry.objects.get(place=place)
except Entry.DoesNotExist:
data_row = Entry()
data_row.place = place
data_row.population_male = int(row['Male Population'])
data_row.population_female = int(row['Female Population'])
data_row.population_total = int(row['Total Population'])
data_row.population_rank = int(row['Population Rank 1=Highest'])
data_row.gender_index = decimal.Decimal(row['Gender Ration Women:Men'])
data_row.gender_index_rank = int(row['Women to Men Ratio Rank 1=Highest'])
data_row.households_total = int(row['Number of Households'])
data_row.average_household_size = decimal.Decimal(row['Average Houshold Size'])
data_row.household_size_rank = int(row['Household Size Rank 1=Highest'])
data_row.area = decimal.Decimal(row['Area in Sq. Km.'])
data_row.area_rank = int(row['Area Size Rank 1=Highest'])
data_row.population_density = decimal.Decimal(row['Density people per Sq. Km'])
data_row.population_density_rank = int(row['Population Density Rank 1=Highest'])
try:
data_row.registered_voters_total = int(row['Total Registered Voters'])
data_row.registered_voters_proportion = decimal.Decimal(row['Registered Voters as % of Population'])
data_row.registered_voters_proportion_rank = int(row['Registered Voters % Rank 1=Highest'])
data_row.youth_voters_proportion = decimal.Decimal(row['Youth Voters as a % of Total'])
data_row.youth_voters_proportion_rank = int(row['Youth Voters % Rank 1=Highest'])
except KeyError:
# One some kinds of place, such as Counties, these columns don't exist.
pass
data_row.save()
| hzj123/56th | pombola/place_data/bin/import_place_data.py | Python | agpl-3.0 | 3,048 |
#! /usr/bin/env python
# -*- coding: UTF-8 -*- #
from table import ParquetTable
__all__ = ['ParquetPool']
class BasePool(dict):
def __setitem__(self, key, item):
self.validate(item)
super(BasePool, self).__setitem__(key, item)
def validate(self, tem):
raise NotImplementedError()
class ParquetPool(BasePool):
def validate(self, item):
if not isinstance(item, ParquetTable):
raise ValueError("Wrong table type")
| globocom/MicroDrill | microdrill/pool.py | Python | apache-2.0 | 474 |
############################################################################
# #
# This Source Code Form is subject to the terms of the Mozilla Public #
# License, v. 2.0. If a copy of the MPL was not distributed with this #
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #
# #
# Copyright (c)2017 Digi International Inc. All Rights Reserved. #
# #
############################################################################
import csv
import fileinput
import logging
import os.path
import paramiko
import re
import socket
import sys
import traceback
from time import gmtime, localtime, strftime
CSV_FIELDNAMES = ['devId', 'installCode']
DEVID = '00000000-00000000-00000000-00000000'
DRM_HOSTNAME = "my.devicecloud.com"
HR = "----------------------------------------------------------------"
IP_FILENAME = "iplist.txt"
PASSWORD = "password"
SSH_TIMEOUT = 20 # seconds
USERNAME = "username"
SSH_PORT = 22
CMD_ENABLE_DRM = "cloud 0 clientconn ON"
CMD_SET_SERVER = "cloud 0 server %s" % DRM_HOSTNAME
CMD_SAVEALL = "config 0 save"
CMD_HW_INFO = "hw ?"
CMD_REBOOT = "reboot"
# Global IP List
ip_addrs = []
csvwriter = None
csvfile = None
def config_logging(filename, console_level, file_level):
# set up logging to file - see previous section for more details
logging.basicConfig(level=file_level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %I:%M:%S%p',
filename='./{0}.log'.format(filename),
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(console_level)
# set a format which is simpler for console use
formatter = logging.Formatter('%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def connect_to_router(ip_addr):
logging.info("Connecting to %s..." % ip_addr)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ip_addr, port=SSH_PORT, username=USERNAME, password=PASSWORD, timeout=SSH_TIMEOUT)
return ssh
def config_router(ip_addr, reboot=True):
ssh_connection = connect_to_router(ip_addr)
logging.info("Enabling Device Cloud Client for %s..." % ip_addr)
stdin, stdout, stderr = ssh_connection.exec_command(CMD_ENABLE_DRM)
logging.info("Setting Device Cloud hostname for %s..." % ip_addr)
stdin, stdout, stderr = ssh_connection.exec_command(CMD_SET_SERVER)
logging.info("Saving config to flash for %s..." % ip_addr)
stdin, stdout, stderr = ssh_connection.exec_command(CMD_SAVEALL)
logging.info("Getting hardware information for %s..." % ip_addr)
stdin, stdout, stderr = ssh_connection.exec_command(CMD_HW_INFO)
hw_info = stdout.readlines()
if reboot:
logging.info("Rebooting %s..." % ip_addr)
stdin, stdout, stderr = ssh_connection.exec_command(CMD_REBOOT)
return hw_info
def add_devices_from_file(fileName):
ip_list = []
if os.path.isfile(os.path.join(os.getcwd(), fileName)):
for line in fileinput.input(fileName):
if line[0] == '#':
continue
ip_list.append(line[0:len(line)-1])
else:
raise Exception("File not found, %s" % fileName)
return ip_list
def format_mac_as_devId(mac):
mac = re.sub('[-: ]', '', mac).upper()
return '00000000-00000000-' + mac[:6] + 'FF-FF' + mac[6:]
def parse_hw_info(hw_info):
logging.debug("HW INFO: %s" % hw_info)
string_match = "MAC 0:"
devId = DEVID
for line in hw_info:
# logging.debug(line)
if string_match in line:
line = line.replace(string_match, '').strip('\r\n').strip().upper()
logging.info("MAC: %s" % line)
devId = format_mac_as_devId(line)
break
logging.info("DevId: %s" % devId)
return devId
def open_csv_file(filename):
file = open(filename, 'wb+')
writer = csv.DictWriter(file, fieldnames=CSV_FIELDNAMES)
# writer.writeheader() # headers not supported in Bulk Add DRM feature
return writer, file
def log_to_csv(filename, devId, installCode=''):
global csvwriter, csvfile
if csvwriter is None:
csvwriter, csvfile = open_csv_file(filename)
logging.debug("Logging to CSV %s" % locals())
row = {
'devId': devId,
'installCode': installCode,
}
logging.debug("ROW: %s" % row)
csvwriter.writerow(row)
def log_status(ip_addr, devId, status='Success', error_msg=None):
pass
def is_valid_ipv4_address(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def is_valid_ipv6_address(address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def prompt(text, default=None, options=None, data_type="string"):
# logging.debug("prompt: Text: {0} || Default: {1} || Options: {2} || DataType: {3}".format(text, default, options, data_type))
value = None
if options and type(options) is not list:
raise Exception("prompt: options parameter must be a list")
if options:
text = "{0} ({1})".format(text, "/".join([str(opt) for opt in options]))
if default:
text = "{0} [{1}]".format(text, str(default))
# logging.trace("prompt: formatted text: {}".format(text))
if default is not None and options is None:
# logging.trace("prompt: default and not options")
value = raw_input(text)
value = value or default
elif options is not None and default is not None and data_type != 'boolean':
# logging.trace("prompt: options and default and data_type != 'boolean'")
while value not in options:
value = validate_data_type(data_type, raw_input(text))
if not value:
value = default
elif options is not None and default is not None and data_type == 'boolean':
# logging.trace("prompt: options and default and data_type == 'boolean'")
value = validate_data_type(data_type, (raw_input(text) or default))
else:
# logging.trace("prompt: else")
while not value:
value = validate_data_type(data_type, raw_input(text))
return value
if __name__ == "__main__":
now = strftime("%Y%m%d_%H%M%S", localtime())
logfile = 'results_{}_{}'.format(sys.argv[0].replace('.py', ''), now)
config_logging(filename=logfile, console_level=logging.INFO, file_level=logging.INFO)
csv_filename = "bulkadd_results_%s.csv" % now
logging.info(HR)
logging.info("| Starting Application to Enable Digi Remote Manager")
logging.info(HR)
try:
# Should application retry single IP
continuous = False
reboot = True
if len(sys.argv) > 1 and is_valid_ipv4_address(sys.argv[1]):
ip_addrs.append(sys.argv[1])
else:
ip_addrs = add_devices_from_file(IP_FILENAME)
if len(sys.argv) > 1:
if '--noreboot' in sys.argv:
reboot = False
logging.info('No reboot enabled')
if '--continuous' in sys.argv:
continuous = True
logging.info('Continuous script enabled')
logging.info(HR)
reloop = True
while reloop:
first = True
for ip in ip_addrs:
if not first:
logging.info(HR)
try:
first = False
cmd_resp = config_router(ip, reboot)
devId = parse_hw_info(cmd_resp)
log_to_csv(csv_filename, devId)
except socket.error as msg:
if str(msg) == 'timed out':
msg = "SSH connection timed out"
logging.error('%s for %s' % (msg, ip))
continue
except paramiko.ssh_exception.AuthenticationException as auth_err:
logging.error('%s for %s' % (auth_err, ip))
continue
if continuous:
prompt('Type Enter to continue', default="Enter")
if not continuous:
reloop = False
except Exception as err:
if csvfile:
csvfile.close()
logging.error(traceback.print_exc())
logging.info(HR)
logging.info("| Application Complete")
logging.info(HR)
| digidotcom/transport_examples | enable_drm/enable_drm.py | Python | mpl-2.0 | 9,155 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Computation of parcellations using a hierarchical approach.
Author: Bertrand Thirion, 2008
"""
from __future__ import print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.random import rand
from nipy.algorithms.clustering.utils import kmeans, voronoi
from .parcellation import MultiSubjectParcellation
from nipy.algorithms.graph.field import Field
from nipy.algorithms.graph.graph import wgraph_from_coo_matrix
warn('Module nipy.labs.spatial_models.hierarchical_parcellation' +
'deprecated, will be removed',
FutureWarning,
stacklevel=2)
def _jointly_reduce_data(data1, data2, chunksize):
lnvox = data1.shape[0]
aux = np.argsort(rand(lnvox)) [:np.minimum(chunksize, lnvox)]
rdata1 = data1[aux]
rdata2 = data2[aux]
return rdata1, rdata2
def _reduce_and_concatenate(data1, data2, chunksize):
nb_subj = len(data1)
rset1 = []
rset2 = []
for s in range(nb_subj):
rdata1, rdata2 = _jointly_reduce_data(data1[s], data2[s],
chunksize / nb_subj)
rset1.append(rdata1)
rset2.append(rdata2)
rset1 = np.concatenate(rset1)
rset2 = np.concatenate(rset2)
return rset1, rset2
def _field_gradient_jac(ref, target):
"""
Given a reference field ref and a target field target
compute the jacobian of the target with respect to ref
Parameters
----------
ref: Field instance
that yields the topology of the space
target : array of shape(ref.V,dim)
Returns
-------
fgj: array of shape (ref.V)
that gives the jacobian implied by the ref.field->target transformation.
"""
import numpy.linalg as nl
n = ref.V
xyz = ref.field
dim = xyz.shape[1]
fgj = []
ln = ref.list_of_neighbors()
for i in range(n):
j = ln[i]
if np.size(j) > dim - 1:
dx = np.squeeze(xyz[j] - xyz[i])
df = np.squeeze(target[j] - target[i])
FG = np.dot(nl.pinv(dx), df)
fgj.append(nl.det(FG))
else:
fgj.append(1)
fgj = np.array(fgj)
return fgj
def _exclusion_map_dep(i, ref, target, targeti):
""" ancillary function to determine admissible values of some position
within some predefined values
Parameters
----------
i (int): index of the structure under consideration
ref: Field that represent the topological structure of parcels
and their standard position
target: array of shape (ref.V,3): current posistion of the parcels
targeti array of shape (n,3): possible new positions for the ith item
Returns
-------
emap: aray of shape (n): a potential that yields the fitness
of the proposed positions given the current configuration
rmin (double): ancillary parameter
"""
xyz = ref.field
ln = ref.list_of_neighbors()
j = ln[i]
if np.size(j) > 0:
dx = xyz[j] - xyz[i]
dx = np.squeeze(dx)
rmin = np.min(np.sum(dx ** 2, 1)) / 4
u0 = xyz[i] + np.mean(target[j] - xyz[j], 1)
emap = - np.sum((targeti - u0) ** 2, 1) + rmin
else:
emap = np.zeros(targeti.shape[0])
return emap
def _exclusion_map(i, ref, target, targeti):
"""Ancillary function to determin admissible values of some position
within some predefined values
Parameters
----------
i (int): index of the structure under consideration
ref: Field that represent the topological structure of parcels
and their standard position
target= array of shape (ref.V,3): current posistion of the parcels
targeti array of shape (n,3): possible new positions for the ith item
Returns
-------
emap: aray of shape (n): a potential that yields the fitness
of the proposed positions given the current configuration
rmin (double): ancillary parameter
"""
xyz = ref.field
fd = target.shape[1]
ln = ref.list_of_neighbors()
j = ln[i]
j = np.reshape(j, np.size(j))
rmin = 0
if np.size(j) > 0:
dx = np.reshape(xyz[j] - xyz[i], (np.size(j), fd))
rmin = np.mean(np.sum(dx ** 2, 1)) / 4
u0 = xyz[i] + np.mean(target[j] - xyz[j], 0)
emap = rmin - np.sum((targeti - u0) ** 2, 1)
for k in j:
amap = np.sum((targeti - target[k]) ** 2, 1) - rmin / 4
emap[amap < 0] = amap[amap < 0]
else:
emap = np.zeros(targeti.shape[0])
return emap, rmin
def _field_gradient_jac_Map_(i, ref, target, targeti):
"""
Given a reference field ref and a target field target
compute the jacobian of the target with respect to ref
"""
import scipy.linalg as nl
xyz = ref.field
fgj = []
ln = ref.list_of_neighbors()
j = ln[i]
if np.size(j) > 0:
dx = xyz[j] - xyz[i]
dx = np.squeeze(dx)
idx = nl.pinv(dx)
for k in range(targeti.shape[0]):
df = target[j] - targeti[k]
df = np.squeeze(df)
fg = np.dot(idx, df)
fgj.append(nl.det(fg))
else:
fgj = np.zeros(targeti.shape[0])
fgj = np.array(fgj)
return fgj
def _field_gradient_jac_Map(i, ref, target, targeti):
"""
Given a reference field ref and a target field target
compute the jacobian of the target with respect to ref
"""
import scipy.linalg as nl
xyz = ref.field
fgj = []
ln = ref.list_of_neighbors()
j = ln[i]
if np.size(j) > 0:
dx = xyz[j] - xyz[i]
dx = np.squeeze(dx)
idx = nl.pinv(dx)
for k in range(targeti.shape[0]):
df = target[j] - targeti[k]
df = np.squeeze(df)
fg = np.dot(idx, df)
fgj.append(nl.det(fg))
fgj = np.array(fgj)
for ij in np.squeeze(j):
aux = []
jj = np.squeeze(ln[ij])
dx = xyz[jj] - xyz[ij]
dx = np.squeeze(dx)
idx = nl.pinv(dx)
ji = np.nonzero(jj == i)
for k in range(targeti.shape[0]):
df = target[jj] - target[ij]
df[ji] = targeti[k] - target[ij]
df = np.squeeze(df)
fg = np.dot(idx, df)
aux.append(nl.det(fg))
aux = np.array(aux)
fgj = np.minimum(fgj, aux)
else:
fgj = np.zeros(targeti.shape[0])
return fgj
def _optim_hparcel(feature, domain, graphs, nb_parcel, lamb=1., dmax=10.,
niter=5, initial_mask=None, chunksize=1.e5, verbose=0):
""" Core function of the heirrachical parcellation procedure.
Parameters
----------
feature: list of subject-related feature arrays
Pa : parcellation instance that is updated
graphs: graph that represents the topology of the parcellation
anat_coord: array of shape (nvox,3) space defining set of coordinates
nb_parcel: int
the number of desrired parcels
lamb=1.0: parameter to weight position
and feature impact on the algorithm
dmax = 10: locality parameter (in the space of anat_coord)
to limit surch volume (CPU save)
chunksize = int, optional
niter = 5: number of iterations in the algorithm
verbose=0: verbosity level
Returns
-------
U: list of arrays of length nsubj
subject-dependent parcellations
Proto_anat: array of shape (nvox) labelling of the common space
(template parcellation)
"""
nb_subj = len(feature)
# a1. perform a rough clustering of the data to make prototype
indiv_coord = np.array([domain.coord[initial_mask[:, s] > - 1]
for s in range(nb_subj)])
reduced_anat, reduced_feature = _reduce_and_concatenate(
indiv_coord, feature, chunksize)
_, labs, _ = kmeans(reduced_feature, nb_parcel, Labels=None, maxiter=10)
proto_anat = [np.mean(reduced_anat[labs == k], 0)
for k in range(nb_parcel)]
proto_anat = np.array(proto_anat)
proto = [np.mean(reduced_feature[labs == k], 0) for k in range(nb_parcel)]
proto = np.array(proto)
# a2. topological model of the parcellation
# group-level part
spatial_proto = Field(nb_parcel)
spatial_proto.set_field(proto_anat)
spatial_proto.voronoi_diagram(proto_anat, domain.coord)
spatial_proto.set_gaussian(proto_anat)
spatial_proto.normalize()
for git in range(niter):
LP = []
LPA = []
U = []
Energy = 0
for s in range(nb_subj):
# b.subject-specific instances of the model
# b.0 subject-specific information
Fs = feature[s]
lac = indiv_coord[s]
target = proto_anat.copy()
lseeds = np.zeros(nb_parcel, np.int)
aux = np.argsort(rand(nb_parcel))
toto = np.zeros(lac.shape[0])
for j in range(nb_parcel):
# b.1 speed-up :only take a small ball
i = aux[j]
dx = lac - target[i]
iz = np.nonzero(np.sum(dx ** 2, 1) < dmax ** 2)
iz = np.reshape(iz, np.size(iz))
if np.size(iz) == 0:
iz = np.array([np.argmin(np.sum(dx ** 2, 1))])
# b.2: anatomical constraints
lanat = np.reshape(lac[iz], (np.size(iz),
domain.coord.shape[1]))
pot = np.zeros(np.size(iz))
JM, rmin = _exclusion_map(i, spatial_proto, target, lanat)
pot[JM < 0] = np.inf
pot[JM >= 0] = - JM[JM >= 0]
# b.3: add feature discrepancy
df = Fs[iz] - proto[i]
df = np.reshape(df, (np.size(iz), proto.shape[1]))
pot += lamb * np.sum(df ** 2, 1)
# b.4: solution
if np.sum(np.isinf(pot)) == np.size(pot):
pot = np.sum(dx[iz] ** 2, 1)
sol = iz[np.argmin(pot)]
target[i] = lac[sol]
lseeds[i] = sol
toto[sol] = 1
if verbose > 1:
jm = _field_gradient_jac(spatial_proto, target)
print(jm.min(), jm.max(), np.sum(toto > 0))
# c.subject-specific parcellation
g = graphs[s]
f = Field(g.V, g.edges, g.weights, Fs)
U.append(f.constrained_voronoi(lseeds))
Energy += np.sum((Fs - proto[U[-1]]) ** 2) / \
np.sum(initial_mask[:, s] > - 1)
# recompute the prototypes
# (average in subject s)
lproto = [np.mean(Fs[U[-1] == k], 0) for k in range(nb_parcel)]
lproto = np.array(lproto)
lproto_anat = np.array([np.mean(lac[U[-1] == k], 0)
for k in range(nb_parcel)])
LP.append(lproto)
LPA.append(lproto_anat)
# recompute the prototypes across subjects
proto_mem = proto.copy()
proto = np.mean(np.array(LP), 0)
proto_anat = np.mean(np.array(LPA), 0)
displ = np.sqrt(np.sum((proto_mem - proto) ** 2, 1).max())
if verbose:
print('energy', Energy, 'displacement', displ)
# recompute the topological model
spatial_proto.set_field(proto_anat)
spatial_proto.voronoi_diagram(proto_anat, domain.coord)
spatial_proto.set_gaussian(proto_anat)
spatial_proto.normalize()
if displ < 1.e-4 * dmax:
break
return U, proto_anat
def hparcel(domain, ldata, nb_parcel, nb_perm=0, niter=5, mu=10., dmax=10.,
lamb=100.0, chunksize=1.e5, verbose=0, initial_mask=None):
"""
Function that performs the parcellation by optimizing the
inter-subject similarity while retaining the connectedness
within subject and some consistency across subjects.
Parameters
----------
domain: discrete_domain.DiscreteDomain instance,
yields all the spatial information on the parcelled domain
ldata: list of (n_subj) arrays of shape (domain.size, dim)
the feature data used to inform the parcellation
nb_parcel: int,
the number of parcels
nb_perm: int, optional,
the number of times the parcellation and prfx
computation is performed on sign-swaped data
niter: int, optional,
number of iterations to obtain the convergence of the method
information in the clustering algorithm
mu: float, optional,
relative weight of anatomical information
dmax: float optional,
radius of allowed deformations
lamb: float optional
parameter to control the relative importance of space vs function
chunksize; int, optional
number of points used in internal sub-sampling
verbose: bool, optional,
verbosity mode
initial_mask: array of shape (domain.size, nb_subj), optional
initial subject-depedent masking of the domain
Returns
-------
Pa: the resulting parcellation structure appended with the labelling
"""
# a various parameters
nbvox = domain.size
nb_subj = len(ldata)
if initial_mask is None:
initial_mask = np.ones((nbvox, nb_subj), np.int)
graphs = []
feature = []
for s in range(nb_subj):
# build subject-specific models of the data
lnvox = np.sum(initial_mask[:, s] > - 1)
lac = domain.coord[initial_mask[:, s] > - 1]
beta = np.reshape(ldata[s], (lnvox, ldata[s].shape[1]))
lf = np.hstack((beta, mu * lac / (1.e-15 + np.std(domain.coord, 0))))
feature.append(lf)
g = wgraph_from_coo_matrix(domain.topology)
g.remove_trivial_edges()
graphs.append(g)
# main function
all_labels, proto_anat = _optim_hparcel(
feature, domain, graphs, nb_parcel, lamb, dmax, niter, initial_mask,
chunksize=chunksize, verbose=verbose)
# write the individual labelling
labels = - np.ones((nbvox, nb_subj)).astype(np.int)
for s in range(nb_subj):
labels[initial_mask[:, s] > -1, s] = all_labels[s]
# compute the group-level labels
template_labels = voronoi(domain.coord, proto_anat)
# create the parcellation
pcl = MultiSubjectParcellation(domain, individual_labels=labels,
template_labels=template_labels,
nb_parcel=nb_parcel)
pcl.make_feature('functional', np.rollaxis(np.array(ldata), 1, 0))
if nb_perm > 0:
prfx0 = perm_prfx(domain, graphs, feature, nb_parcel, ldata,
initial_mask, nb_perm, niter, dmax, lamb, chunksize)
return pcl, prfx0
else:
return pcl
def perm_prfx(domain, graphs, features, nb_parcel, ldata, initial_mask=None,
nb_perm=100, niter=5, dmax=10., lamb=100.0, chunksize=1.e5,
verbose=1):
"""
caveat: assumes that the functional dimension is 1
"""
from ..utils.reproducibility_measures import ttest
# permutations for the assesment of the results
prfx0 = []
adim = domain.coord.shape[1]
nb_subj = len(ldata)
for q in range(nb_perm):
feature = []
sldata = []
for s in range(nb_subj):
lf = features[s].copy()
swap = (rand() > 0.5) * 2 - 1
lf[:, 0:-adim] = swap * lf[:, 0:-adim]
sldata.append(swap * ldata[s])
feature.append(lf)
# optimization part
all_labels, proto_anat = _optim_hparcel(
feature, domain, graphs, nb_parcel, lamb, dmax, niter,
initial_mask, chunksize=chunksize)
labels = - np.ones((domain.size, nb_subj)).astype(np.int)
for s in range(nb_subj):
labels[initial_mask[:, s] > -1, s] = all_labels[s]
# compute the group-level labels
template_labels = voronoi(domain.coord, proto_anat)
# create the parcellation
pcl = MultiSubjectParcellation(domain, individual_labels=labels,
template_labels=template_labels)
pdata = pcl.make_feature('functional',
np.rollaxis(np.array(ldata), 1, 0))
prfx = ttest(np.squeeze(pdata))
if verbose:
print(q, prfx.max(0))
prfx0.append(prfx.max(0))
return prfx0
| alexis-roche/nipy | nipy/labs/spatial_models/hierarchical_parcellation.py | Python | bsd-3-clause | 16,558 |
# -*- test-case-name: buildbot_UnrealEngine.test.test_Build -*-
from ..UnrealCommand import BaseUnrealCommand
from buildbot import config
from twisted.internet import defer
class Build(BaseUnrealCommand):
"""Runs the UnrealBuildTool (UBT)"""
name = "UEBuild"
supported_build_types = ["Build", "Rebuild", "Clean"]
renderables = [
"target_platform",
"target_config",
"wait_mutex",
"build_type",
"target",
]
def __init__(
self,
engine_path,
project_path,
target,
build_type="Build",
target_config="Development",
target_platform="Win64",
wait_mutex=True,
**kwargs):
self.target = target
self.target_config = target_config
self.target_platform = target_platform
self.build_type = build_type
self.wait_mutex = wait_mutex
super(Build, self).__init__(engine_path, project_path, **kwargs)
@defer.inlineCallbacks
def run(self):
command = [
self.getEngineBatchFilesPath(
self.build_type, inside_platform_dir=True),
self.target,
self.target_platform,
self.target_config,
self.project_path]
if self.wait_mutex:
command.append("-WaitMutex")
self.setupLogfiles()
cmd = yield self.makeRemoteShellCommand(command=command)
yield self.runCommand(cmd)
defer.returnValue(cmd.results())
def getCurrentSummary(self):
return {"step": " ".join(self.getDescription(False))}
def getResultSummary(self):
return {"step": " ".join(self.getDescription(True))}
def getDescription(self, done=False):
description = [self.name]
description.append("built" if done else "is building")
description.extend([
self.getProjectFileName(),
"for",
self.target_config,
self.target_platform
])
if done:
description.extend(self.getDescriptionDetails())
return description
def doSanityChecks(self):
if (isinstance(self.build_type, str) and
self.build_type not in self.supported_build_types):
config.error(
"build_type '{0}' is not supported".format(self.build_type))
if (isinstance(self.target_config, str) and
self.target_config not in self.supported_target_config):
config.error("target_config '{0}' is not supported".format(
self.target_config))
if (isinstance(self.target_platform, str) and
self.target_platform not in self.supported_target_platforms):
config.error("target_platform '{0}' is not supported".format(
self.target_platform))
super(Build, self).doSanityChecks()
class Rebuild(Build):
def __init__(
self,
engine_path,
project_path,
target,
**kwargs
):
super(Rebuild, self).__init__(engine_path, project_path,
target, build_type="Rebuild", **kwargs)
name = "UERebuild"
class Clean(Build):
def __init__(
self,
engine_path,
project_path,
target,
**kwargs
):
super(Clean, self).__init__(engine_path, project_path,
target, build_type="Clean", **kwargs)
name = "UEClean"
| pampersrocker/buildbot-UnrealEngine | buildbot_UnrealEngine/BuildTool/Build.py | Python | mit | 3,514 |
"""This module contains all necessary views to power up shopping list web application"""
import time
import main
from flask import flash, redirect, render_template, request, session, url_for
from flask.views import View
from .db.shopping_list.shopping import ShoppingList
from .forms import (CreateShoppingItemForm, CreateShoppingListForm, LoginForm, RegistrationForm)
from .utils.helpers import (check_name, get_shl, check_duplicate_item_name,
change_shl_name, check_item, get_item, check_username,
check_email, get_user)
class RegisterView(View):
"""A view class to handle """
methods = ['GET', 'POST']
def dispatch_request(self):
form = RegistrationForm(request.form)
if 'user' in session:
flash(u'you are already logged in!', 'info')
return redirect(url_for('index'))
if request.method == 'POST':
# get required data
form = RegistrationForm(request.form)
if form.validate():
username = form.username.data
email = form.email.data
password1 = form.password.data
errors = []
if not check_username(username): # check username is already taken
if not check_email(email): # check if email is taken
user = main.APP.user_manager.create_user(username, email, password1)
main.APP_USERS.insert(0, user)
flash(u'Success! you may now login using '
u'your username and password', 'success')
return redirect(url_for('index'))
else:
error = '%(email)s already taken' % dict(email=email)
errors.append(error)
else:
error = '%(username)s already taken' % dict(username=username)
errors.append(error)
flash(u'%(errors)s' % dict(errors=', '.join(errors)), 'warning')
return render_template('register.html', title='Register', form=form)
class LoginView(View):
"""Class that handles user login"""
methods = ['GET', 'POST']
def dispatch_request(self):
if 'user' in session:
flash(u'you are already logged in!', 'info')
return redirect(url_for('index'))
form = LoginForm()
if request.method == 'POST':
form = LoginForm(request.form)
if form.validate():
username = form.username.data
password = form.password.data
user = get_user(username)
if user is not False:
if user.verify_password(password):
session['user'] = username
flash(u'login successful', 'success')
return redirect(url_for('index'))
flash(u'incorrect username or password', 'info')
return render_template('login.html', form=form, title='Login')
class LogoutView(View):
"""A view to logout a user"""
methods = ['GET', ]
def dispatch_request(self):
if 'user' in session:
session.pop('user')
return redirect(url_for('index'))
flash(u'successfully logged out!', 'success')
return redirect(url_for('index'))
class IndexView(View):
"""User home page view"""
methods = ['GET', ]
def dispatch_request(self):
is_auth = False
if 'user' in session:
is_auth = True
return render_template('index.html', is_auth=is_auth, title='Home Page')
class DashboardView(View):
"""A view to display user dashboard"""
methods = ['GET', ]
def dispatch_request(self):
is_auth = False
username = None
if 'user' not in session: # check if user is logged in
flash('you must be logged in, or create an account if you dont have one', 'warning')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
username = session.get('user')
owner = session.get('user')
user_shopping_list = [ushl for ushl in main.APP.shopping_list
if owner == ushl.get('shl').added_by]
return render_template('dashboard.html', is_auth=is_auth,
shopping_lists=user_shopping_list, title='Dashboard',
username=username)
class CreateShoppingListView(View):
"""A view to create shopping list"""
methods = ['GET', 'POST']
def dispatch_request(self):
form = CreateShoppingListForm()
is_auth = False
if 'user' not in session:
flash(u'Warning!! you must be logged in', 'warning')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
if request.method == 'POST':
form = CreateShoppingListForm(request.form)
if form.validate():
name = form.name.data
# check if shopping list name exists
if not check_name(name):
user = session.get('user')
today = time.strftime("%x")
shl = ShoppingList()
shl.create(name, user, today)
main.APP.shopping_list.append({'name': name, 'shl': shl})
flash(u'Shopping list created', 'success')
return redirect(url_for('dashboard'))
flash(u'Shopping list with that name already exists, '
u'try another name', 'warning')
flash(u'Correct the errors', 'warning')
return render_template('shopping_list/create-shopping-list.html', is_auth=is_auth,
title='Create Shopping List', form=form)
class ShoppingListDetailView(View):
"""
A View to handle retrieval of a specific shopping list and creation of
its shopping items
"""
methods = ['GET', 'POST']
def dispatch_request(self):
is_auth = False
if 'user' not in session: # check if user is logged in
flash('you must be logged in, or create an account if you dont have one')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
form = CreateShoppingItemForm()
name = request.args.get('name')
if not check_name(name):
flash(u'The requested shopping list does not exist!', 'danger')
return redirect(url_for('dashboard'))
shl = get_shl(name)
if request.method == 'POST':
form = CreateShoppingItemForm(request.form)
if form.validate():
shl_item = main.APP.shopping_item()
item_name = form.item_name.data
if check_duplicate_item_name(name, item_name):
flash(u"item with that name already exists", 'warning')
else:
item_quantity = form.quantity.data
item_price = form.price.data
shl_item.create(item_name, float(item_quantity), float(item_price), False)
shl.get('shl').items.append(shl_item)
flash(u'Item successfully added', 'success')
return redirect(url_for('shopping-list-detail', name=name))
flash(u'Please correct the errors below', 'warning')
return render_template(
'shopping_list/shopping-list-detail.html',
obj=shl, form=form, is_auth=is_auth, title=name.capitalize())
class UpdateShoppingListView(View):
"""
A class to update shopping list
"""
methods = ['GET', 'POST']
def dispatch_request(self):
name = request.args.get('name')
form = CreateShoppingListForm(name=name)
if not check_name(name):
flash(u'The requested shopping list does not exist', 'danger')
return redirect(url_for('dashboard'))
if request.method == 'POST':
form = CreateShoppingListForm(request.form)
if form.validate():
new_name = form.name.data
shl = get_shl(name)
shl.get('shl').update('name', new_name)
change_shl_name(name, new_name)
flash(u'Shopping list name changed successfully', 'success')
return redirect(url_for('dashboard'))
return render_template('shopping_list/shopping-list-edit.html', form=form, name=name)
class UpdateShoppingItemView(View):
"""
A View to only update a single shopping item
"""
methods = ['GET', 'POST']
def dispatch_request(self):
is_auth = False
if 'user' not in session: # check if user is logged in
flash('you must be logged in, or create an account if you dont have one')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
name = request.args.get('sname') # name of the shopping list
item_name = request.args.get('iname')
if not check_name(name):
flash(u'The requested shopping list does not exist', 'warning')
return redirect(url_for('dashboard'))
if not check_item(name, item_name):
flash(u'The requested shopping item does not exist', 'warning')
return redirect(url_for('dashboard'))
prev_data = {}
for item in get_shl(name).get('shl').items:
if item.name == item_name:
prev_data.update({'name': item.name})
prev_data.update({'quantity': item.quantity})
prev_data.update({'price': item.price})
prev_data.update({'checked': item.checked})
break
if not prev_data:
flash(u'The shopping item you are trying to update does not exist', 'danger')
form = CreateShoppingItemForm(
item_name=prev_data.pop('name'),
quantity=prev_data.pop('quantity'),
price=prev_data.pop('price'),
checked=prev_data.pop('checked')
)
if request.method == 'POST':
form = CreateShoppingItemForm(request.form)
if form.validate():
new_item_name = form.item_name.data
new_quantity = float(form.quantity.data)
new_price = float(form.price.data)
checked = form.checked.data
item = get_item(name, item_name)
if item:
item.update('name', new_item_name)
item.update('quantity', new_quantity)
item.update('price', new_price)
item.update('checked', checked)
flash(u'Item successfully updated', 'success')
return redirect(url_for('shopping-list-detail', name=name))
return render_template('shopping_list/shopping-item-edit.html', form=form,
item_name=item_name, is_auth=is_auth,
title='Update %(item)s' % dict(item=item_name))
class RemoveShoppingListView(View):
"""A view to remove a single shopping list"""
methods = ['GET', ]
def dispatch_request(self):
is_auth = False
if 'user' not in session: # check if user is logged in
flash('you must be logged in, or create an account if you dont have one')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
name = request.args.get('name')
shl = get_shl(name)
main.APP.shopping_list.remove(shl)
flash(u'Success!! Shopping List removed', 'success')
return redirect(url_for('dashboard'))
class RemoveShoppingItemView(View):
"""A view to remove shopping item"""
methods = ['GET', 'POST']
def dispatch_request(self):
is_auth = False
if 'user' not in session: # check if user is logged in
flash('you must be logged in, or create an account if you dont have one')
return redirect(url_for('login'))
if 'user' in session:
is_auth = True
name = request.args.get('name')
item_name = request.args.get('item_name')
shl_items = get_shl(name).get('shl').items
for item in shl_items:
if item.name == item_name:
shl_items.remove(item)
flash(u"Success!! Item succesfully removed", 'success')
return redirect(url_for('shopping-list-detail', name=name))
class AboutView(View):
"""About view"""
methods = ['GET']
def dispatch_request(self):
return render_template('flatpages/about.html', title='About') | gr1d99/shopping-list | shopping_app/views.py | Python | mit | 12,925 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
"""Endpoints for the photo albums."""
from flask import Blueprint, g, request, redirect, url_for, flash
import rophako.model.user as User
import rophako.model.photo as Photo
from rophako.utils import (template, pretty_time, render_markdown,
login_required, ajax_response)
from rophako.plugin import load_plugin
from rophako.settings import Config
mod = Blueprint("photo", __name__, url_prefix="/photos")
load_plugin("rophako.modules.comment")
@mod.route("/")
def index():
return redirect(url_for(".albums"))
@mod.route("/albums")
def albums():
"""View the index of the photo albums."""
albums = Photo.list_albums()
# If there's only one album, jump directly to that one.
if len(albums) == 1:
return redirect(url_for(".album_index", name=albums[0]["name"]))
g.info["albums"] = albums
return template("photos/albums.html")
@mod.route("/album/<name>")
def album_index(name):
"""View the photos inside an album."""
photos = Photo.list_photos(name)
if photos is None:
flash("That album doesn't exist.")
return redirect(url_for(".albums"))
g.info["album"] = name
g.info["album_info"] = Photo.get_album(name)
if not g.info["album_info"]:
flash("That photo album wasn't found!")
return redirect(url_for(".albums"))
g.info["markdown"] = render_markdown(g.info["album_info"]["description"])
g.info["photos"] = photos
# Render Markdown descriptions for photos.
for photo in g.info["photos"]:
photo["data"]["markdown"] = render_markdown(photo["data"].get("description", ""))
return template("photos/album.html")
@mod.route("/view/<key>")
def view_photo(key):
"""View a specific photo."""
photo = Photo.get_photo(key)
if photo is None:
flash("That photo wasn't found!")
return redirect(url_for(".albums"))
# Get the author info.
author = User.get_user(uid=photo["author"])
if author:
g.info["author"] = author
g.info["photo"] = photo
g.info["photo"]["key"] = key
g.info["photo"]["pretty_time"] = pretty_time(Config.photo.time_format, photo["uploaded"])
g.info["photo"]["markdown"] = render_markdown(photo.get("description", ""))
return template("photos/view.html")
@mod.route("/upload", methods=["GET", "POST"])
@login_required
def upload():
"""Upload a photo."""
if request.method == "POST":
# We're posting the upload.
# Is this an ajax post or a direct post?
is_ajax = request.form.get("__ajax", "false") == "true"
# Album name.
album = request.form.get("album") or request.form.get("new-album")
# What source is the pic from?
result = None
location = request.form.get("location")
if location == "pc":
# An upload from the PC.
result = Photo.upload_from_pc(request)
elif location == "www":
# An upload from the Internet.
result = Photo.upload_from_www(request.form)
else:
flash("Stop messing around.")
return redirect(url_for(".upload"))
# How'd it go?
if result["success"] is not True:
if is_ajax:
return ajax_response(False, result["error"])
else:
flash("The upload has failed: {}".format(result["error"]))
return redirect(url_for(".upload"))
# Good!
if is_ajax:
# Was it a multiple upload?
if result.get("multi"):
return ajax_response(True, url_for(".album_index", name=album))
else:
return ajax_response(True, url_for(".crop", photo=result["photo"]))
else:
if result["multi"]:
return redirect(url_for(".album_index", name=album))
else:
return redirect(url_for(".crop", photo=result["photo"]))
# Get the list of available albums.
g.info["album_list"] = [
"My Photos", # the default
]
g.info["selected"] = Config.photo.default_album
albums = Photo.list_albums()
if len(albums):
g.info["album_list"] = [ x["name"] for x in albums ]
g.info["selected"] = albums[0]
return template("photos/upload.html")
@mod.route("/crop/<photo>", methods=["GET", "POST"])
@login_required
def crop(photo):
pic = Photo.get_photo(photo)
if not pic:
flash("The photo you want to crop wasn't found!")
return redirect(url_for(".albums"))
# Saving?
if request.method == "POST":
try:
x = int(request.form.get("x", 0))
y = int(request.form.get("y", 0))
length = int(request.form.get("length", 0))
except:
flash("Error with form inputs.")
return redirect(url_for(".crop", photo=photo))
# Re-crop the photo!
Photo.crop_photo(photo, x, y, length)
flash("The photo has been cropped!")
return redirect(url_for(".albums")) # TODO go to photo
# Get the photo's true size.
true_width, true_height = Photo.get_image_dimensions(pic)
g.info["true_width"] = true_width
g.info["true_height"] = true_height
g.info["photo"] = photo
g.info["preview"] = pic["large"]
return template("photos/crop.html")
@mod.route("/set_cover/<album>/<key>")
@login_required
def set_cover(album, key):
"""Set the pic as the album cover."""
pic = Photo.get_photo(key)
if not pic:
flash("The photo you want to crop wasn't found!")
return redirect(url_for(".albums"))
Photo.set_album_cover(album, key)
flash("Album cover has been set.")
return redirect(url_for(".albums"))
@mod.route("/set_profile/<key>")
@login_required
def set_profile(key):
"""Set the pic as your profile picture."""
pic = Photo.get_photo(key)
if not pic:
flash("The photo wasn't found!")
return redirect(url_for(".albums"))
uid = g.info["session"]["uid"]
User.update_user(uid, dict(picture=key))
flash("Your profile picture has been updated.")
return redirect(url_for(".view_photo", key=key))
@mod.route("/edit/<key>", methods=["GET", "POST"])
@login_required
def edit(key):
"""Edit a photo."""
pic = Photo.get_photo(key)
if not pic:
flash("The photo wasn't found!")
return redirect(url_for(".albums"))
if request.method == "POST":
caption = request.form.get("caption", "")
description = request.form.get("description", "")
rotate = request.form.get("rotate", "")
Photo.edit_photo(key, dict(caption=caption, description=description))
# Rotating the photo?
if rotate in ["left", "right", "180"]:
Photo.rotate_photo(key, rotate)
flash("The photo has been updated.")
return redirect(url_for(".view_photo", key=key))
g.info["key"] = key
g.info["photo"] = pic
return template("photos/edit.html")
@mod.route("/delete/<key>", methods=["GET", "POST"])
@login_required
def delete(key):
"""Delete a photo."""
pic = Photo.get_photo(key)
if not pic:
flash("The photo wasn't found!")
return redirect(url_for(".albums"))
if request.method == "POST":
# Do it.
Photo.delete_photo(key)
flash("The photo has been deleted.")
return redirect(url_for(".albums"))
g.info["key"] = key
g.info["photo"] = pic
return template("photos/delete.html")
@mod.route("/edit_album/<album>", methods=["GET", "POST"])
@login_required
def edit_album(album):
photos = Photo.list_photos(album)
if photos is None:
flash("That album doesn't exist.")
return redirect(url_for(".albums"))
if request.method == "POST":
# Collect the form details.
new_name = request.form["name"]
description = request.form["description"]
layout = request.form["format"]
# Renaming the album?
if new_name != album:
ok = Photo.rename_album(album, new_name)
if not ok:
flash("Failed to rename album: already exists?")
return redirect(url_for(".edit_album", album=album))
album = new_name
# Update album settings.
Photo.edit_album(album, dict(
description=description,
format=layout,
))
return redirect(url_for(".albums"))
g.info["album"] = album
g.info["album_info"] = Photo.get_album(album)
g.info["photos"] = photos
return template("photos/edit_album.html")
@mod.route("/arrange_albums", methods=["GET", "POST"])
@login_required
def arrange_albums():
"""Rearrange the photo album order."""
albums = Photo.list_albums()
if len(albums) == 0:
flash("There are no albums yet.")
return redirect(url_for(".albums"))
if request.method == "POST":
order = request.form.get("order", "").split(";")
Photo.order_albums(order)
flash("The albums have been rearranged!")
return redirect(url_for(".albums"))
g.info["albums"] = albums
return template("photos/arrange_albums.html")
@mod.route("/edit_captions/<album>", methods=["GET", "POST"])
@login_required
def bulk_captions(album):
"""Bulk edit captions and titles in an album."""
photos = Photo.list_photos(album)
if photos is None:
flash("That album doesn't exist.")
return redirect(url_for(".albums"))
if request.method == "POST":
# Do it.
for photo in photos:
caption_key = "{}:caption".format(photo["key"])
desc_key = "{}:description".format(photo["key"])
if caption_key in request.form and desc_key in request.form:
caption = request.form[caption_key]
description = request.form[desc_key]
Photo.edit_photo(photo['key'], dict(caption=caption, description=description))
flash("The photos have been updated.")
return redirect(url_for(".albums"))
g.info["album"] = album
g.info["photos"] = photos
return template("photos/edit_captions.html")
@mod.route("/delete_album/<album>", methods=["GET", "POST"])
@login_required
def delete_album(album):
"""Delete an entire album."""
photos = Photo.list_photos(album)
if photos is None:
flash("That album doesn't exist.")
return redirect(url_for(".albums"))
if request.method == "POST":
# Do it.
for photo in photos:
Photo.delete_photo(photo["key"])
flash("The album has been deleted.")
return redirect(url_for(".albums"))
g.info["album"] = album
return template("photos/delete_album.html")
@mod.route("/arrange_photos/<album>", methods=["GET", "POST"])
@login_required
def arrange_photos(album):
"""Rearrange the photos in an album."""
photos = Photo.list_photos(album)
if photos is None:
flash("That album doesn't exist.")
return redirect(url_for(".albums"))
if request.method == "POST":
order = request.form.get("order", "").split(";")
Photo.order_photos(album, order)
flash("The albums have been rearranged!")
return redirect(url_for(".album_index", name=album))
g.info["album"] = album
g.info["photos"] = photos
return template("photos/arrange_photos.html")
| kirsle/rophako | rophako/modules/photo/__init__.py | Python | gpl-2.0 | 11,484 |
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.mail import Mail
from flask.ext.moment import Moment
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.pagedown import PageDown
from config import config
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
db = SQLAlchemy()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
return app
| fakdora/flaksy-upto-login | app/__init__.py | Python | mit | 1,012 |
import Image, ImageFilter, ImageChops, ImageStat
import time
def estimate(file, s=5):
"""Estimates the amount of focus of an image file.
Returns a real number: lower values indicate better focus.
"""
im = Image.open(file).convert("L")
w,h = im.size
box = (w/2 - 50, h/2 - 50, w/2 + 50, h/2 + 50)
im = im.crop(box)
imf = im.filter(ImageFilter.MedianFilter(s))
d = ImageChops.subtract(im, imf, 1, 100)
return ImageStat.Stat(d).stddev[0]
if __name__ == "__main__":
t = time.time()
print eval_focus("preview.jpg")
print time.time()-t
| cgart/photobooth | piggyphoto/focus.py | Python | mit | 584 |
import argparse
import os.path
import read
import analyze
import numpy as np
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("state", help="2-letter code for the US state, or 'US' for all states")
argparser.add_argument("--online", dest='online', const=True, default=False, action='store_const', help="Use FIA website")
args = argparser.parse_args()
args.state = [args.state]
if args.state == ['US']:
args.state = ['AL', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'FL', 'GA', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WV', 'WI']
for state in args.state:
if args.online:
plots = read.parse(state, online=True)
else:
plots = read.parse(state, online=False)
read.cluster_prep_file(plots, state)
read.clean(state, b=True)
| OneStone2/mcmc_growth | update.py | Python | mit | 1,008 |
#!/usr/bin/env python
# Copyright 2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, platform, subprocess
def test(root_dir, debug, spy, env):
threads = ['-j', '2'] if 'TRAVIS' in env else []
terra = ['--with-terra', env['TERRA_DIR']] if 'TERRA_DIR' in env else []
debug_flag = ['--debug'] if debug else []
inner_flag = ['--extra=-flegion-inner', '--extra=0'] if 'DISABLE_INNER' in env else []
subprocess.check_call(
['time', './install.py', '--rdir=auto'] + threads + terra + debug_flag,
env = env,
cwd = root_dir)
if not spy:
subprocess.check_call(
['time', './test.py', '-q'] + threads + debug_flag + inner_flag,
env = env,
cwd = root_dir)
if spy:
subprocess.check_call(
['time', './test.py', '-q', '--spy'] + threads + inner_flag,
env = env,
cwd = root_dir)
if __name__ == '__main__':
root_dir = os.path.realpath(os.path.dirname(__file__))
legion_dir = os.path.dirname(root_dir)
runtime_dir = os.path.join(legion_dir, 'runtime')
env = dict(os.environ.iteritems())
env.update({
'LG_RT_DIR': runtime_dir,
'LUAJIT_URL': 'http://legion.stanford.edu/~eslaught/mirror/LuaJIT-2.0.4.tar.gz',
})
# reduce output spewage by default
if 'MAKEFLAGS' not in env:
env['MAKEFLAGS'] = 's'
test(root_dir, env['DEBUG'], 'TEST_SPY' in env and env['TEST_SPY'], env)
| chuckatkins/legion | language/travis.py | Python | apache-2.0 | 1,989 |
from django.conf.urls import url
from apps.followers.api.followers import AddFollowerView, RemoveFollowerView, ProfileView, \
FollowedByListView, FollowingListView
urlpatterns = \
[
url(r'^profile/(?P<pk>[0-9]+)/$', ProfileView.as_view(), name="profile"),
url(r'^follow/(?P<follower_id>[0-9]+)/$', AddFollowerView.as_view(), name="followers-add"),
url(r'^unfollow/(?P<follower_id>[0-9]+)/$', RemoveFollowerView.as_view(), name="followers-remove"),
url(r'^(?P<user_id>[0-9]+)/followed_by/$', FollowedByListView.as_view(), name="followers-by-list"),
url(r'^(?P<user_id>[0-9]+)/following/$', FollowingListView.as_view(), name="following-list"),
]
| kamilgregorczyk/instalike | apps/followers/urls/followers.py | Python | mit | 698 |
# -*- coding: utf-8 -*-
#
# doto documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 11 16:24:07 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import os
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sphinx_rtd_theme = None
html_theme = "default"
# import juliadoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# 'juliadoc.julia',
# 'juliadoc.jlhelp',
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'doto'
copyright = u'2014, Benjamin Zaitlen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'julia'
# html_theme_path = [juliadoc.get_theme_dir()]
# html_sidebars = juliadoc.default_sidebars()
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dotodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'doto.tex', u'doto Documentation',
u'Benjamin Zaitlen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'doto', u'doto Documentation',
[u'Benjamin Zaitlen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'doto', u'doto Documentation',
u'Benjamin Zaitlen', 'doto', 'Python API for DigitalOcean',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| waytai/doto | doc/conf.py | Python | mit | 8,681 |
# encoding: utf-8
import ckan.model as model
import ckan.lib.create_test_data as ctd
CreateTestData = ctd.CreateTestData
class FollowerClassesTests(object):
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_get(self):
following = self.FOLLOWER_CLASS.get(self.follower.id, self.followee.id)
assert following.follower_id == self.follower.id, following
assert following.object_id == self.followee.id, following
def test_get_returns_none_if_couldnt_find_users(self):
following = self.FOLLOWER_CLASS.get('some-id', 'other-id')
assert following is None, following
def test_is_following(self):
assert self.FOLLOWER_CLASS.is_following(self.follower.id,
self.followee.id)
def test_is_following_returns_false_if_user_isnt_following(self):
assert not self.FOLLOWER_CLASS.is_following(self.followee.id,
self.follower.id)
def test_followee_count(self):
count = self.FOLLOWER_CLASS.followee_count(self.follower.id)
assert count == 1, count
def test_followee_list(self):
followees = self.FOLLOWER_CLASS.followee_list(self.follower.id)
object_ids = [f.object_id for f in followees]
assert object_ids == [self.followee.id], object_ids
def test_follower_count(self):
count = self.FOLLOWER_CLASS.follower_count(self.followee.id)
assert count == 1, count
def test_follower_list(self):
followers = self.FOLLOWER_CLASS.follower_list(self.followee.id)
follower_ids = [f.follower_id for f in followers]
assert follower_ids == [self.follower.id], follower_ids
class TestUserFollowingUser(FollowerClassesTests):
FOLLOWER_CLASS = model.UserFollowingUser
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
cls.follower = CreateTestData.create_user('follower')
cls.followee = CreateTestData.create_user('followee')
cls.FOLLOWER_CLASS(cls.follower.id, cls.followee.id).save()
cls._create_deleted_models()
@classmethod
def _create_deleted_models(cls):
deleted_user = CreateTestData.create_user('deleted_user')
cls.FOLLOWER_CLASS(deleted_user.id, cls.followee.id).save()
cls.FOLLOWER_CLASS(cls.follower.id, deleted_user.id).save()
deleted_user.delete()
deleted_user.save()
class TestUserFollowingDataset(FollowerClassesTests):
FOLLOWER_CLASS = model.UserFollowingDataset
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
cls.follower = CreateTestData.create_user('follower')
cls.followee = cls._create_dataset('followee')
cls.FOLLOWER_CLASS(cls.follower.id, cls.followee.id).save()
cls._create_deleted_models()
@classmethod
def _create_deleted_models(cls):
deleted_user = CreateTestData.create_user('deleted_user')
cls.FOLLOWER_CLASS(deleted_user.id, cls.followee.id).save()
deleted_user.delete()
deleted_user.save()
deleted_dataset = cls._create_dataset('deleted_dataset')
cls.FOLLOWER_CLASS(cls.follower.id, deleted_dataset.id).save()
deleted_dataset.delete()
deleted_dataset.save()
@classmethod
def _create_dataset(self, name):
CreateTestData.create_arbitrary({'name': name})
return model.Package.get(name)
class TestUserFollowingGroup(FollowerClassesTests):
FOLLOWER_CLASS = model.UserFollowingGroup
@classmethod
def setup_class(cls):
model.repo.rebuild_db()
model.repo.new_revision()
cls.follower = CreateTestData.create_user('follower')
cls.followee = cls._create_group('followee')
cls.FOLLOWER_CLASS(cls.follower.id, cls.followee.id).save()
cls._create_deleted_models()
model.repo.commit_and_remove()
@classmethod
def _create_deleted_models(cls):
deleted_user = CreateTestData.create_user('deleted_user')
cls.FOLLOWER_CLASS(deleted_user.id, cls.followee.id).save()
deleted_user.delete()
deleted_user.save()
deleted_group = cls._create_group('deleted_group')
cls.FOLLOWER_CLASS(cls.follower.id, deleted_group.id).save()
deleted_group.delete()
deleted_group.save()
@classmethod
def _create_group(self, name):
group = model.Group(name)
group.save()
return group
| NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/legacy/models/test_follower.py | Python | gpl-3.0 | 4,502 |
# Search for lines that have an at sign between characters
# The characters must be a letter or number
import re
hand = open('mbox-short.txt')
for line in hand:
line = line.rstrip()
x = re.findall('[a-zA-Z0-9]\S+@\S+[a-zA-Z]', line)
if len(x) > 0 :
print(x)
| mkhuthir/learnPython | Book_pythonlearn_com/18_regex/re07.py | Python | mit | 279 |
from learn_theano.utils.s3_download import S3
import subprocess
import os
import glob
def get_nottingham_midi_folder():
s3 = S3()
resulting_folder = os.path.join(s3.cache_folder(), "Nottingham")
if os.path.isdir(resulting_folder):
return resulting_folder
print("Unzipping Nottingham midi dataset...")
zipped = S3().download('datasets/Nottingham.zip')
subprocess.check_call("unzip %s -d %s" % (zipped, s3.cache_folder()), shell=True)
assert (os.path.isdir(resulting_folder))
return resulting_folder
def get_nottingham_dataset():
folder = get_nottingham_midi_folder()
return [glob.glob(os.path.join(folder, set_name, '*.mid'))
for set_name in ['train', 'valid', 'test']]
if __name__ == "__main__":
f = get_nottingham_midi_folder()
print(f)
| consciousnesss/learn_theano | learn_theano/utils/midi_nottingham_dataset.py | Python | apache-2.0 | 814 |
import contextlib
import sys
from django.db import models, connection
from django.utils import formats
class GoProjectReview(models.Model):
#
email = models.CharField(max_length = 200)
text = models.TextField()
date = models.DateTimeField()
notes = models.TextField()
resolved = models.BooleanField(default = False)
class Meta:
verbose_name = "Go Project Review"
verbose_name_plural = "Go Project Reviews"
def __unicode__(self):
return "{0} - {1}".format(
formats.date_format(self.date, "SHORT_DATETIME_FORMAT"),
self.email)
class GoProjectRequest(models.Model):
#
email = models.CharField(max_length = 200)
scm_url = models.CharField(max_length = 250)
text = models.TextField()
date = models.DateTimeField()
notes = models.TextField()
resolved = models.BooleanField(default = False)
class Meta:
verbose_name = "Go Project Request"
verbose_name_plural = "Go Project Requests"
def __unicode__(self):
return self.scm_url
class GoPage(models.Model):
#
url_name = models.CharField(max_length = 250, unique = True)
name = models.CharField(max_length = 250)
content = models.TextField()
class Meta:
verbose_name = "Page"
verbose_name_plural = "Pages"
def __unicode__(self):
return self.name
class GoProjectDesc(models.Model):
#
name = models.CharField(max_length = 250)
full_name = models.CharField(max_length = 250, unique = True)
scm_url = models.CharField(max_length = 250)
trend = models.IntegerField(default = 0)
update_lock = models.BooleanField(default = False)
update_date = models.DateTimeField(null = True)
class Meta:
verbose_name = "Go Project Description"
verbose_name_plural = "Go Project Descriptions"
def __unicode__(self):
return self.full_name
class GoProjectCommit(models.Model):
project_desc = models.ForeignKey(GoProjectDesc)
#
commit = models.CharField(max_length = 250)
tag = models.CharField(max_length = 250)
commit_msg = models.TextField()
author = models.CharField(max_length = 250)
date = models.DateTimeField()
changes_count = models.IntegerField(default = 0)
class Meta:
verbose_name = "Go Project Commit"
verbose_name_plural = "Go Project Commits"
def __unicode__(self):
return self.commit[:8]
class GoProjectLog(models.Model):
project_commit = models.ForeignKey(GoProjectCommit)
#
modification = models.BooleanField(default = False)
api_change = models.CharField(max_length = 250)
package_name = models.CharField(max_length = 250)
class Meta:
verbose_name = "Go Project Log"
verbose_name_plural = "Go Project Logs"
def __unicode__(self):
return "{0}/{1}".format(self.package_name, self.api_change)
@contextlib.contextmanager
def db_lock(tables):
def lock_tables(tables):
table_names = [model._meta.db_table for model in tables]
statement = ', '.join(['%s WRITE' % table for table in table_names])
statement = 'LOCK TABLES %s' % ', '.join([statement])
cursor = connection.cursor()
print >> sys.stderr, "Locking..."
#cursor.execute(statement)
return cursor
if connection.settings_dict['ENGINE'] != 'django.db.backends.mysql':
raise Exception("Database lock is probably not supported!")
cursor = lock_tables(tables)
try:
yield cursor
finally:
print >> sys.stderr, "Unlocking..."
cursor.execute("UNLOCK TABLES")
| fridex/gofed-web | goview/models.py | Python | gpl-2.0 | 3,482 |
# Copyright 2021 IBM Corp. All Rights Reserved.
# (C) Copyright 2021 Inova Development Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the classes and instances to mock a WEMServer as the basis for
test mocks.
This model is defined in a single multi-level dictionary and the
WbemServerMock class uses that dictionary to populate the mock
cim_repository with qualifiers, classes, and instances for the model
This was modified from an equivalent test tool in pywbem
NOTE: There is a version of this functionality wbemserver_mock_v0.py
that is used to run tests against python versions < 3 because the
mock script using setup does not exist for these version of python.
This file defines the class that implements building the mock wbem
server from a dictionary and a default dictionary to build the WBEM
server.
It requires a class such as the wbemserver_mock.py class to actually
call this class to define a complete pywbemcli mock startup script.
This class does not incluede the script startup method to all users to
define alternate wbem server dictionaries than the default and build
the corresponding mock WBEM server for tests.
"""
from __future__ import print_function, absolute_import
import os
from pywbem import ValueMapping, CIMInstance, CIMInstanceName, CIMError, \
CIM_ERR_ALREADY_EXISTS
from pywbem_mock import FakedWBEMConnection, DMTFCIMSchema, \
CIMNamespaceProvider, CIMIndicationFilterProvider, \
CIMListenerDestinationProvider, CIMIndicationSubscriptionProvider
DMTF_TEST_SCHEMA_VER = (2, 49, 0)
# Location of DMTF schema directory used by all tests.
# This directory is permanent and should not be removed.
TESTSUITE_SCHEMA_DIR = os.path.join('tests', 'schema')
# The following dictionary represents the data required to build a
# set of classes and instances for a mock WbemServer. This dictionary defines
# the following elements of a wbem server:
#
# dmtf_schema: The DMTF schema version (ex (2, 49, 0) of the schema that
# will be installed and the directory into which it will be installed
#
# system_name: The name of the system (used by the CIM_ObjectManager)
#
# object_manager: A dictionary that defines variable element for the
# CIM_ObjectManager class.
#
# interop_namespace: The interop namespace. Note that that if the interop
# namespace is defined in the WbemServerMock init method that overrides any
# value in this dictionary
#
# class-names: Lists of leaf classes origanized by namespace that are to
# be used as the basis to build classes in the mock CIM repository
#
# class_mof: element that defines specific classes that are to be included
# in the model. This is largely to allow building trivial sublcasses for
# components of profiles
#
# registered_profiles: The Organization, profile name, profile version for any
# registered profiles that are to be built
#
# referenced_profiles: Definition of CIM_ReferencedProfile associations
# between CIM_RegisteredProfiles. This is used to test get_central_instances
#
# central-instances: Specific central intances that are built
#
# element-conforms-to: Set of relations that define the associations between
# the registered profiles and central classes.#
#
DEFAULT_WBEM_SERVER_MOCK_DEFAULT_DICT = {
# Defines the DMTF schema from which qualifier declarations and classes
# are to be retrieved.
'dmtf_schema': {'version': DMTF_TEST_SCHEMA_VER,
'dir': TESTSUITE_SCHEMA_DIR},
# TODO: Relook at this since it is just a class compile based on a mof file
# Other schema definitions and the mof from them to be inserted.
# This is used just to intall one piece of mof today.
'pg_schema': {'interop':
{'dir': os.path.join(TESTSUITE_SCHEMA_DIR, 'OpenPegasus'),
'files': ['PG_Namespace.mof']}},
# Definition of the interop namespace name.
'interop-namspace': 'interop',
# Class names for leaf classes from the dmtf_schema to be installed in the
# mockserver organized by namespace. The namespaces will be built if they
# do not already exist before the build_class method is called.
'class-names': {'interop': ['CIM_Namespace',
'CIM_ObjectManager',
'CIM_RegisteredProfile',
'CIM_ElementConformsToProfile',
'CIM_ReferencedProfile',
'CIM_ComputerSystem',
'CIM_CIMOMStatisticalData',
'CIM_ListenerDestinationCIMXML',
'CIM_IndicationFilter',
'CIM_IndicationSubscription'],
'root/cimv2': ['CIM_ElementConformsToProfile',
'CIM_ComputerSystem']},
# class MOF that must be compiled in the environment by namespace
'class-mof': {'root/cimv2': ["class MCK_StorageComputerSystem: "
"CIM_ComputerSystem{};", ]},
# A name for the system and properties for the object namager that
# will be used to build the object manager instance
'system_name': 'Mock_WBEMServerTest',
'object_manager': {'Name': 'FakeObjectManager',
'ElementName': 'Pegasus',
'Description': 'Pegasus CIM Server Version 2.15.0'
' Released',
'GatherStatisticalData': False},
# User providers that are to be registered.
'user_providers': ['namespaceprovider',
'subscriptionproviders'],
'registered_profiles': [('DMTF', 'Indications', '1.1.0'),
('DMTF', 'Profile Registration', '1.0.0'),
('SNIA', 'Server', '1.2.0'),
('SNIA', 'Server', '1.1.0'),
('SNIA', 'SMI-S', '1.2.0'),
('SNIA', 'Array', '1.4.0'),
('SNIA', 'Software', '1.4.0'),
('DMTF', 'Component', '1.4.0'), ],
'referenced_profiles': [
(('SNIA', 'Server', '1.2.0'), ('DMTF', 'Indications', '1.1.0')),
(('SNIA', 'Server', '1.2.0'), ('SNIA', 'Array', '1.4.0')),
(('SNIA', 'Array', '1.4.0'), ('DMTF', 'Component', '1.4.0')),
(('SNIA', 'Array', '1.4.0'), ('SNIA', 'Software', '1.4.0')),
],
# List of CIMInstances to install by namespace. Each entry is a CIM
# instance with classname,and properties. All properties required to build
# the path must be defined. No other properties are required for this test.
# (namespace, instance)
'central-instances': {
'interop': [], # TODO add CIMComputerSystem profile
'root/cimv2':
[CIMInstance(
'MCK_StorageComputerSystem',
properties={'Name': "10.1.2.3",
'CreationClassName': "MCK_StorageComputerSystem",
'NameFormat': "IP"})]},
# TODO/Future: Add definition of scoping instance path
'scoping-instances': [],
# Define the relationships between a central instance and a specific
# profile by namespace where the namespace is the location of the
# central instance.
# Two elements in list for each element conforms to definition.
# 1. a specific profile including org, name, and version
# 2. Components to define an instance including:
# Classname,
# Keybindings of CIMInstance name
'element_conforms_to_profile':
{'interop': [],
'root/cimv2': [
(('SNIA', 'Server', '1.2.0'),
("MCK_StorageComputerSystem", {'Name': "10.1.2.3",
'CreationClassName':
"MCK_StorageComputerSystem"})), ],
},
}
class WbemServerMock(object):
# pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""
Class that mocks the classes and methods used by the pywbem
WBEMServer class so that the WBEMServer class will produce valid data
for the server CIM_ObjectManager, CIM_Namespace, CIM_RegisteredProfile
instances.
This can be used to test the WbemServer class but is also required for
other tests such as the Subscription manager since that class is based
on getting data from the WbemServer class (ex. namespace)
It allows building a the instance data for a particular server either
from user defined input or from standard data predefined for pywbem
tests
"""
def __init__(self, conn, server, interop_ns=None, server_mock_data=None,
verbose=None):
# pylint: disable=too-many-arguments, too-many-locals
"""
Build the mock repository with the classes and instances defined for
the WBEM server in accord with the dictionary defined in
server_mock_data or in the default mock configuration dictionary
DEFAULT_WBEM_SERVER_MOCK_DEFAULT_DICT.
Parameters:
conn (:class:`~pywbem.WBEMConnection`):
The connection already defined by pywbemcli
server (:class:`pywbem.WBEMServer`):
The WBEM server instance already defined by pywbemcli. This
is normally provided by the initiating call from pywbemcli
to execute this script
interop_ns (:term:`string):
Interop namespace. Overrides the interop namespace defined in
the server_mock_data dictionary if it exists. This
is normally provided by the initiating call from pywbemcli
to execute this script
server_mock_data (:class:`py:dict`):
Dictionary that defines the characteristics of the mock. The
default is DEFAULT_WBEM_SERVER_MOCK_DEFAULT_DICT defined above.
verbose
"""
self.verbose = verbose
self.conn = conn
self.wbem_server = server
# default to the config dictionary defined in
# DEFAULT_WBEM_SERVER_MOCK_DEFAULTDICT above.
self.server_mock_data = server_mock_data or \
DEFAULT_WBEM_SERVER_MOCK_DEFAULT_DICT
FakedWBEMConnection._reset_logging_config()
# Step 0: establish the interop namespace.
self.interop_ns = interop_ns or \
self.server_mock_data['interop-namspace']
# Step 1: build classes for all namespaces based on class-names dict
class_names = self.server_mock_data['class-names']
for namespace, clns in class_names.items():
self.build_classes(clns, namespace)
self.display("Built classes")
# Step 2: install user providers
# TODO/Future: Install user providers from configuration definition
# For now, install the Namespace provider. The issue is knowing
# exactly what input parameters are required for each user provider.
for provider in self.server_mock_data['user_providers']:
if provider == "namespaceprovider":
ns_provider = CIMNamespaceProvider(conn.cimrepository)
conn.register_provider(ns_provider, namespaces=self.interop_ns)
elif provider == 'subscriptionproviders':
reg_provider = CIMIndicationFilterProvider(conn.cimrepository)
conn.register_provider(reg_provider, namespaces=self.interop_ns)
reg_provider = CIMListenerDestinationProvider(
conn.cimrepository)
conn.register_provider(reg_provider, namespaces=self.interop_ns)
reg_provider = CIMIndicationSubscriptionProvider(
conn.cimrepository)
conn.register_provider(reg_provider, namespaces=self.interop_ns)
# NOTE: The wbemserver is not usable until the instances for at
# least object manager and namespaces have been inserted. Any attempt
# to display the instance objects before that will fail because the
# enumerate namespaces will be inconsistent.
# Step 3: build the object manager.
# Build CIM_ObjectManager instance into the interop namespace since
# this is required to build namespace instances
# # build the dictionary of values for the CIM_ObjectManager
# TODO: Move this so definition is completely in hands of the
# dictionary
object_mgr_data = self.server_mock_data['object_manager']
omdict = {
"SystemCreationClassName": "CIM_ComputerSystem",
"CreationClassName": "CIM_ObjectManager",
"SystemName": self.server_mock_data['system_name'],
"Name": object_mgr_data['Name'],
"ElementName": object_mgr_data['ElementName'],
"Description": object_mgr_data['Description'],
"GatherStatisticalData": object_mgr_data['GatherStatisticalData']}
self.build_obj_mgr_inst(omdict)
self.display("Built object manager object")
# Step 4: Build the registered profile and referenced_profile instances
self.build_reg_profile_insts(
self.server_mock_data['registered_profiles'])
self.build_referenced_profile_insts(
self.server_mock_data['referenced_profiles'])
self.display("Built profile instances")
# Step 5: build the defined central instances
for ns, insts in self.server_mock_data['central-instances'].items():
self.build_central_instances(ns, insts)
# Step 6: build the defined element-conforms-to-profile associations
# build element_conforms_to_profile insts from dictionary
for ns, items in self.server_mock_data[
'element_conforms_to_profile'].items():
for item in items:
profile_name = item[0]
central_inst_path = CIMInstanceName(
item[1][0],
keybindings=item[1][1],
host=conn.host,
namespace=ns)
prof_insts = self.wbem_server.get_selected_profiles(
registered_org=profile_name[0],
registered_name=profile_name[1],
registered_version=profile_name[2])
assert len(prof_insts) == 1
self.build_ECTP_inst(prof_insts[0].path, central_inst_path)
self.display("Built central instances and element_conforms_to_Profile")
def __str__(self):
return 'object_manager_name={!r}, interop_ns={!r}, system_name=' \
'{!r}, dmtf_schema_ver={!r}, schema_dir={!r}, wbem_server={}' \
.format(self.server_mock_data['object_manager']['Name'],
self.interop_ns,
self.server_mock_data['system_name'],
self.server_mock_data['dmtf_schema']['version'],
self.server_mock_data['dmtf_schema']['dir'],
getattr(self, 'wbem_server', None))
def __repr__(self):
"""
Return a representation of the class object
with all attributes, that is suitable for debugging.
"""
return 'WBEMServerMock(object_manager_name={!r}, interop_ns={!r}, ' \
'system_name={!r}, dmtf_schema_ver={!r}, schema_dir={!r}, ' \
'wbem_server={!r}, registered_profiles={!r})' \
.format(self.server_mock_data['object_manager']['Name'],
self.interop_ns,
self.server_mock_data['system_name'],
self.server_mock_data['dmtf_schema']['version'],
self.server_mock_data['dmtf_schema']['dir'],
getattr(self, 'wbem_server', None),
self.server_mock_data['registered_profiles'])
def display(self, txt):
"""Display the txt and current repository. Diagnostic only"""
if self.verbose:
print(txt)
self.conn.display_repository()
def build_classes(self, classes, namespace):
"""
Build the schema qualifier declarations, and the class objects in the
defined namespace of the CIM repository from a DMTF schema.
This requires only that the leaf objects be defined in a mof
include file since the compiler finds the files for qualifiers
and dependent classes.
"""
try:
self.conn.add_namespace(namespace)
except CIMError as er:
if er.status_code != CIM_ERR_ALREADY_EXISTS:
pass
# Compile the leaf classes into the CIM repositor
# This test not required to use the class in the test environment.
# However, if it is ever used as template for code that could
# execute on pywbem version 0.x, this test is required.
if hasattr(self.conn, 'compile_schema_classes'):
# Using pywbem 1.x
schema = DMTFCIMSchema(
self.server_mock_data['dmtf_schema']['version'],
self.server_mock_data['dmtf_schema']['dir'],
use_experimental=False,
verbose=self.verbose)
# pylint: disable=no-member
self.conn.compile_schema_classes(
classes,
schema.schema_pragma_file,
namespace=namespace,
verbose=self.verbose)
else:
# Using pywbem 0.x
self.conn.compile_dmtf_schema(
self.server_mock_data['dmtf_schema']['version'],
self.server_mock_data['dmtf_schema']['dir'],
class_names=classes,
namespace=namespace,
verbose=self.verbose)
# Build the pg_schema elements
# TODO: This should be separate method.
if namespace in self.server_mock_data['pg_schema']:
pg_schema_ns = self.server_mock_data['pg_schema'][namespace]
filenames = pg_schema_ns['files']
pg_dir = pg_schema_ns['dir']
for fn in filenames:
filepath = os.path.join(pg_dir, fn)
self.conn.compile_mof_file(
filepath, namespace=namespace,
search_paths=[pg_dir],
verbose=self.verbose)
# Compile the mof defined in the 'class-mof definitions
if namespace in self.server_mock_data['class-mof']:
mofs = self.server_mock_data['class-mof'][namespace]
for mof in mofs:
self.conn.compile_mof_string(mof, namespace=namespace,
verbose=self.verbose)
def inst_from_classname(self, class_name, namespace=None,
property_list=None,
property_values=None,
include_missing_properties=True,
include_path=True):
# pylint: disable=too-many-arguments
"""
Build instance from classname using class_name property to get class
from a repository.
"""
cls = self.conn.GetClass(class_name,
namespace=namespace,
LocalOnly=False,
IncludeQualifiers=True,
IncludeClassOrigin=True,
PropertyList=property_list)
return CIMInstance.from_class(
cls,
namespace=namespace,
property_values=property_values,
include_missing_properties=include_missing_properties,
include_path=include_path)
def add_inst_from_def(self, class_name, namespace=None,
property_values=None,
include_missing_properties=True,
include_path=True):
"""
Build and insert into the environment a complete instance given the
classname and a dictionary defining the properties of the instance.
"""
# pylint: disable=too-many-arguments
new_inst = self.inst_from_classname(
class_name,
namespace=namespace,
property_values=property_values,
include_missing_properties=include_missing_properties,
include_path=include_path)
self.conn.CreateInstance(new_inst, namespace=namespace)
return new_inst
def build_obj_mgr_inst(self, object_manager_values_dict):
"""
Build a CIMObjectManager instance for the mock wbem server using
fixed data defined in this method and data from the init parameter
mock data. Add this instance to the repository
"""
ominst = self.add_inst_from_def(
"CIM_ObjectManager",
namespace=self.interop_ns,
property_values=object_manager_values_dict,
include_missing_properties=False,
include_path=True)
rtn_ominsts = self.conn.EnumerateInstances("CIM_ObjectManager",
namespace=self.interop_ns)
assert len(rtn_ominsts) == 1, \
"Expected 1 ObjetManager instance, got {!r}".format(rtn_ominsts)
return ominst
def build_reg_profile_insts(self, profiles):
"""
Build and install in repository the registered profiles defined by
the profiles parameter. A dictionary of tuples where each tuple
contains RegisteredOrganization, RegisteredName, RegisteredVersion
Parameters:
conn:
profiles (dict of lists where each list contains org, name, version
for a profiles)
"""
# Map ValueMap to Value
org_vm = ValueMapping.for_property(self.conn, self.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
# This is a workaround hack to get ValueMap from Value
org_vm_dict = {} # reverse mapping dictionary (valueMap from Value)
for value in range(0, 22):
org_vm_dict[org_vm.tovalues(value)] = value
for profile in profiles:
instance_id = '{}+{}+{}'.format(profile[0], profile[1], profile[2])
reg_prof_dict = {'RegisteredOrganization': org_vm_dict[profile[0]],
'RegisteredName': profile[1],
'RegisteredVersion': profile[2],
'InstanceID': instance_id}
self.add_inst_from_def("CIM_RegisteredProfile",
namespace=self.interop_ns,
property_values=reg_prof_dict,
include_missing_properties=False,
include_path=True)
rtn_rpinsts = self.conn.EnumerateInstances("CIM_RegisteredProfile",
namespace=self.interop_ns)
assert rtn_rpinsts, \
"Expected 1 or more RegisteredProfile instances, got none"
def build_ECTP_inst(self, profile_path, element_path):
"""
Build an instance of CIM_ElementConformsToProfile and insert into
repository
"""
class_name = 'CIM_ElementConformsToProfile'
element_conforms_dict = {'ConformantStandard': profile_path,
'ManagedElement': element_path}
inst = self.add_inst_from_def(class_name,
namespace=self.interop_ns,
property_values=element_conforms_dict,
include_missing_properties=False,
include_path=True)
assert self.conn.EnumerateInstances(class_name,
namespace=self.interop_ns)
assert self.conn.GetInstance(inst.path)
def build_referenced_profile_insts(self, referenced_profiles):
"""
Build and install in repository the referemced profile instances
defined by the referemces parameter. A dictionary of tuples where each
tuple contains Antecedent and Dependent reference in terms of the
profile name as a tuple (org, name, version).
Parameters:
profiles (dict of tuples where each tuple defines the antecedent
and dependent)
"""
class_name = 'CIM_ReferencedProfile'
for profile_name in referenced_profiles:
antecedent = profile_name[0]
dependent = profile_name[1]
antecedent_inst = self.wbem_server.get_selected_profiles(
registered_org=antecedent[0],
registered_name=antecedent[1],
registered_version=antecedent[2])
dependent_inst = self.wbem_server.get_selected_profiles(
registered_org=dependent[0],
registered_name=dependent[1],
registered_version=dependent[2])
assert len(antecedent_inst) == 1, \
"Antecedent: {0}".format(antecedent)
assert len(dependent_inst) == 1, \
"Dependent: {0}".format(dependent)
ref_profile_dict = {'Antecedent': antecedent_inst[0].path,
'Dependent': dependent_inst[0].path}
inst = self.add_inst_from_def(class_name,
namespace=self.interop_ns,
property_values=ref_profile_dict,
include_missing_properties=False,
include_path=True)
assert self.conn.EnumerateInstances(class_name,
namespace=self.interop_ns)
assert self.conn.GetInstance(inst.path)
def build_central_instances(self, namespace, central_instances):
"""
Build the central_instances from the definitions provided in the list
central_instance where each definition is a python CIMInstance object
and add them to the repository. This method adds the path to each
"""
for inst in central_instances:
cls = self.conn.GetClass(inst.classname, namespace=namespace,
LocalOnly=False, IncludeQualifiers=True,
IncludeClassOrigin=True)
inst.path = CIMInstanceName.from_instance(
cls, inst, namespace=namespace, strict=True)
self.conn.CreateInstance(inst, namespace=namespace)
| pywbem/pywbemtools | tests/unit/pywbemcli/testmock/wbemserver_mock_class.py | Python | apache-2.0 | 27,276 |
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/phsen_abcdef_dclpy
@author Nick Almonte
@brief Parser for the phsen_abcdef_dcl dataset driver
Release notes:
initial release
"""
import re
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.instrument.dataset_data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.common_regexes import ONE_OR_MORE_WHITESPACE_REGEX
from mi.dataset.parser.utilities import convert_to_signed_int_16_bit, dcl_time_to_ntp, \
time_1904_to_ntp
__author__ = 'Nick Almonte'
__license__ = 'Apache 2.0'
METADATA_PARTICLE_CLASS_KEY = 'metadata_particle_class'
# The key for the data particle class
DATA_PARTICLE_CLASS_KEY = 'data_particle_class'
log = get_logger()
def _calculate_working_record_checksum(working_record):
"""
Calculates the checksum of the argument ascii-hex string
@retval int - modulo integer checksum value of argument ascii-hex string
"""
checksum = 0
# strip off the leading * and ID characters of the log line (3 characters) and
# strip off the trailing Checksum characters (2 characters)
star_and_checksum_stripped_working_record = working_record[3:-2]
working_record_length = len(star_and_checksum_stripped_working_record)
for x in range(0, working_record_length, 2):
value = star_and_checksum_stripped_working_record[x:x+2]
checksum += int(value, 16)
modulo_checksum = checksum % 256
return modulo_checksum
class DataParticleType(BaseEnum):
"""
The data particle types that a phsen_abcdef_dcl parser may generate
"""
METADATA_RECOVERED = 'phsen_abcdef_dcl_metadata_recovered'
INSTRUMENT_RECOVERED = 'phsen_abcdef_dcl_instrument_recovered'
METADATA_TELEMETERED = 'phsen_abcdef_dcl_metadata'
INSTRUMENT_TELEMETERED = 'phsen_abcdef_dcl_instrument'
class StateKey(BaseEnum):
POSITION = 'position' # hold the current file position
START_OF_DATA = 'start_of_data'
class PhsenAbcdefDclMetadataDataParticle(DataParticle):
def _build_parsed_values(self):
"""
Extracts PHSEN ABCDEF DCL Metadata data from raw_data.
@returns result a list of dictionaries of particle data
"""
# extract dcl_controller_timestamp
dcl_controller_timestamp = self.raw_data[0]
# extract the working_record string from the raw data tuple
working_record = self.raw_data[1]
# Per the IDD, voltage_battery data is optional and not guaranteed to be included in every CONTROL
# data record. Nominal size of a metadata string without the voltage_battery data is 39 (including the #).
# Voltage data adds 4 ascii characters to that, so raw_data greater than 41 contains voltage data,
# anything smaller does not.
if len(working_record) >= 41:
have_voltage_battery_data = True
else:
have_voltage_battery_data = False
# Begin saving particle data
unique_id_ascii_hex = working_record[1:3]
# convert 2 ascii (hex) chars to int
unique_id_int = int(unique_id_ascii_hex, 16)
record_type_ascii_hex = working_record[5:7]
# convert 2 ascii (hex) chars to int
record_type_int = int(record_type_ascii_hex, 16)
record_time_ascii_hex = working_record[7:15]
# convert 8 ascii (hex) chars to int
record_time_int = int(record_time_ascii_hex, 16)
# Instrument timestamp is the internal_timestamp
instrument_timestamp = record_time_int
self.set_internal_timestamp(timestamp=instrument_timestamp + time_1904_to_ntp(1904))
# FLAGS
flags_ascii_hex = working_record[15:19]
# convert 4 ascii (hex) chars to list of binary data
flags_ascii_int = int(flags_ascii_hex, 16)
binary_list = [(flags_ascii_int >> x) & 0x1 for x in range(16)]
clock_active = binary_list[0]
recording_active = binary_list[1]
record_end_on_time = binary_list[2]
record_memory_full = binary_list[3]
record_end_on_error = binary_list[4]
data_download_ok = binary_list[5]
flash_memory_open = binary_list[6]
battery_low_prestart = binary_list[7]
battery_low_measurement = binary_list[8]
battery_low_blank = binary_list[9]
battery_low_external = binary_list[10]
external_device1_fault = binary_list[11]
external_device2_fault = binary_list[12]
external_device3_fault = binary_list[13]
flash_erased = binary_list[14]
power_on_invalid = binary_list[15]
num_data_records_ascii_hex = working_record[19:25]
# convert 6 ascii (hex) chars to int
num_data_records_int = int(num_data_records_ascii_hex, 16)
num_error_records_ascii_hex = working_record[25:31]
# convert 6 ascii (hex) chars to int
num_error_records_int = int(num_error_records_ascii_hex, 16)
num_bytes_stored_ascii_hex = working_record[31:37]
# convert 6 ascii (hex) chars to int
num_bytes_stored_int = int(num_bytes_stored_ascii_hex, 16)
calculated_checksum = _calculate_working_record_checksum(working_record)
# Record may not have voltage data...
if have_voltage_battery_data:
voltage_battery_ascii_hex = working_record[37:41]
# convert 4 ascii (hex) chars to int
voltage_battery_int = int(voltage_battery_ascii_hex, 16)
passed_checksum_ascii_hex = working_record[41:43]
# convert 2 ascii (hex) chars to int
passed_checksum_int = int(passed_checksum_ascii_hex, 16)
# Per IDD, if the calculated checksum does not match the checksum in the record,
# use a checksum of zero in the resultant particle
if passed_checksum_int != calculated_checksum:
checksum_final = 0
else:
checksum_final = 1
else:
voltage_battery_int = None
passed_checksum_ascii_hex = working_record[37:39]
# convert 2 ascii (hex) chars to int
passed_checksum_int = int(passed_checksum_ascii_hex, 16)
# Per IDD, if the calculated checksum does not match the checksum in the record,
# use a checksum of zero in the resultant particle
if passed_checksum_int != calculated_checksum:
checksum_final = 0
else:
checksum_final = 1
# ASSEMBLE THE RESULTANT PARTICLE..
resultant_particle_data = [{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.UNIQUE_ID,
DataParticleKey.VALUE: unique_id_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.RECORD_TYPE,
DataParticleKey.VALUE: record_type_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.CLOCK_ACTIVE,
DataParticleKey.VALUE: clock_active},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.RECORDING_ACTIVE,
DataParticleKey.VALUE: recording_active},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.RECORD_END_ON_TIME,
DataParticleKey.VALUE: record_end_on_time},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.RECORD_MEMORY_FULL,
DataParticleKey.VALUE: record_memory_full},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.RECORD_END_ON_ERROR,
DataParticleKey.VALUE: record_end_on_error},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.DATA_DOWNLOAD_OK,
DataParticleKey.VALUE: data_download_ok},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.FLASH_MEMORY_OPEN,
DataParticleKey.VALUE: flash_memory_open},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.BATTERY_LOW_PRESTART,
DataParticleKey.VALUE: battery_low_prestart},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.BATTERY_LOW_MEASUREMENT,
DataParticleKey.VALUE: battery_low_measurement},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.BATTERY_LOW_BLANK,
DataParticleKey.VALUE: battery_low_blank},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.BATTERY_LOW_EXTERNAL,
DataParticleKey.VALUE: battery_low_external},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.EXTERNAL_DEVICE1_FAULT,
DataParticleKey.VALUE: external_device1_fault},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.EXTERNAL_DEVICE2_FAULT,
DataParticleKey.VALUE: external_device2_fault},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.EXTERNAL_DEVICE3_FAULT,
DataParticleKey.VALUE: external_device3_fault},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.FLASH_ERASED,
DataParticleKey.VALUE: flash_erased},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.POWER_ON_INVALID,
DataParticleKey.VALUE: power_on_invalid},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.NUM_DATA_RECORDS,
DataParticleKey.VALUE: num_data_records_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.NUM_ERROR_RECORDS,
DataParticleKey.VALUE: num_error_records_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.NUM_BYTES_STORED,
DataParticleKey.VALUE: num_bytes_stored_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.VOLTAGE_BATTERY,
DataParticleKey.VALUE: voltage_battery_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclMetadataDataParticleKey.PASSED_CHECKSUM,
DataParticleKey.VALUE: checksum_final}]
return resultant_particle_data
class PhsenAbcdefDclMetadataDataParticleKey(BaseEnum):
DCL_CONTROLLER_TIMESTAMP = 'dcl_controller_timestamp'
UNIQUE_ID = 'unique_id'
RECORD_TYPE = 'record_type'
RECORD_TIME = 'record_time'
CLOCK_ACTIVE = 'clock_active'
RECORDING_ACTIVE = 'recording_active'
RECORD_END_ON_TIME = 'record_end_on_time'
RECORD_MEMORY_FULL = 'record_memory_full'
RECORD_END_ON_ERROR = 'record_end_on_error'
DATA_DOWNLOAD_OK = 'data_download_ok'
FLASH_MEMORY_OPEN = 'flash_memory_open'
BATTERY_LOW_PRESTART = 'battery_low_prestart'
BATTERY_LOW_MEASUREMENT = 'battery_low_measurement'
BATTERY_LOW_BLANK = 'battery_low_blank'
BATTERY_LOW_EXTERNAL = 'battery_low_external'
EXTERNAL_DEVICE1_FAULT = 'external_device1_fault'
EXTERNAL_DEVICE2_FAULT = 'external_device2_fault'
EXTERNAL_DEVICE3_FAULT = 'external_device3_fault'
FLASH_ERASED = 'flash_erased'
POWER_ON_INVALID = 'power_on_invalid'
NUM_DATA_RECORDS = 'num_data_records'
NUM_ERROR_RECORDS = 'num_error_records'
NUM_BYTES_STORED = 'num_bytes_stored'
VOLTAGE_BATTERY = 'voltage_battery'
PASSED_CHECKSUM = 'passed_checksum'
class PhsenAbcdefDclInstrumentDataParticle(DataParticle):
measurement_num_of_chars = 4
def _create_light_measurements_array(self, working_record):
"""
Creates a light measurement array from raw data for a PHSEN DCL Instrument record
@returns list a list of light measurement values. From the IDD: (an) array of 92 light measurements
(23 sets of 4 measurements)
"""
light_measurements_list_int = []
light_measurements_chunk = working_record[83:-14]
light_measurements_ascii_hex = [light_measurements_chunk[i:i+self.measurement_num_of_chars]
for i in range(0, len(light_measurements_chunk),
self.measurement_num_of_chars)]
for ascii_hex_value in light_measurements_ascii_hex:
light_measurements_int = convert_to_signed_int_16_bit(ascii_hex_value)
light_measurements_list_int.append(light_measurements_int)
return light_measurements_list_int
def _create_reference_light_measurements_array(self, working_record):
"""
Creates a reference light measurement array from raw data for a PHSEN DCL Instrument record
@returns list a list of light measurement values. From the IDD: (an) array of 16 measurements
(4 sets of 4 measurements)
"""
reference_light_measurements_list_int = []
reference_light_measurements_chunk = working_record[19:-382]
reference_light_measurements_ascii_hex = [reference_light_measurements_chunk[i:i+self.measurement_num_of_chars]
for i in range(0, len(reference_light_measurements_chunk),
self.measurement_num_of_chars)]
for ascii_hex_value in reference_light_measurements_ascii_hex:
reference_light_measurements_int = convert_to_signed_int_16_bit(ascii_hex_value)
reference_light_measurements_list_int.append(reference_light_measurements_int)
return reference_light_measurements_list_int
def _build_parsed_values(self):
"""
Extracts PHSEN ABCDEF DCL Instrument data from the raw_data tuple.
@returns result a list of dictionaries of particle data
"""
# extract dcl_controller_timestamp
dcl_controller_timestamp = self.raw_data[0]
# extract the working_record string from the raw data tuple
working_record = self.raw_data[1]
# Begin saving particle data
unique_id_ascii_hex = working_record[1:3]
# convert 2 ascii (hex) chars to int
unique_id_int = int(unique_id_ascii_hex, 16)
record_type_ascii_hex = working_record[5:7]
# convert 2 ascii (hex) chars to int
record_type_int = int(record_type_ascii_hex, 16)
record_time_ascii_hex = working_record[7:15]
# convert 8 ascii (hex) chars to int
record_time_int = int(record_time_ascii_hex, 16)
# Instrument timestamp is the internal_timestamp
self.set_internal_timestamp(timestamp=time_1904_to_ntp(int(record_time_ascii_hex, 16)))
thermistor_start_ascii_hex = working_record[15:19]
# convert 4 ascii (hex) chars to int
thermistor_start_int = int(thermistor_start_ascii_hex, 16)
reference_light_measurements_list_int = self._create_reference_light_measurements_array(working_record)
light_measurements_list_int = self._create_light_measurements_array(working_record)
voltage_battery_ascii_hex = working_record[455:459]
# convert 4 ascii (hex) chars to int
voltage_battery_int = int(voltage_battery_ascii_hex, 16)
thermistor_end_ascii_hex = working_record[459:463]
# convert 4 ascii (hex) chars to int
thermistor_end_int = int(thermistor_end_ascii_hex, 16)
passed_checksum_ascii_hex = working_record[463:465]
# convert 2 ascii (hex) chars to int
passed_checksum_int = int(passed_checksum_ascii_hex, 16)
calculated_checksum = _calculate_working_record_checksum(working_record)
# Per IDD, if the calculated checksum does not match the checksum in the record,
# use a checksum of zero in the resultant particle
if passed_checksum_int != calculated_checksum:
checksum_final = 0
else:
checksum_final = 1
# ASSEMBLE THE RESULTANT PARTICLE..
resultant_particle_data = [{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.UNIQUE_ID,
DataParticleKey.VALUE: unique_id_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.RECORD_TYPE,
DataParticleKey.VALUE: record_type_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.THERMISTOR_START,
DataParticleKey.VALUE: thermistor_start_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS,
DataParticleKey.VALUE: reference_light_measurements_list_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.LIGHT_MEASUREMENTS,
DataParticleKey.VALUE: light_measurements_list_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.VOLTAGE_BATTERY,
DataParticleKey.VALUE: voltage_battery_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.THERMISTOR_END,
DataParticleKey.VALUE: thermistor_end_int},
{DataParticleKey.VALUE_ID:
PhsenAbcdefDclInstrumentDataParticleKey.PASSED_CHECKSUM,
DataParticleKey.VALUE: checksum_final}]
return resultant_particle_data
class PhsenAbcdefDclInstrumentDataParticleKey(BaseEnum):
DCL_CONTROLLER_TIMESTAMP = 'dcl_controller_timestamp'
UNIQUE_ID = 'unique_id'
RECORD_TYPE = 'record_type'
RECORD_TIME = 'record_time'
THERMISTOR_START = 'thermistor_start'
REFERENCE_LIGHT_MEASUREMENTS = 'reference_light_measurements'
LIGHT_MEASUREMENTS = 'light_measurements'
VOLTAGE_BATTERY = 'voltage_battery'
THERMISTOR_END = 'thermistor_end'
PASSED_CHECKSUM = 'passed_checksum'
class PhsenAbcdefDclMetadataRecoveredDataParticle(PhsenAbcdefDclMetadataDataParticle):
_data_particle_type = DataParticleType.METADATA_RECOVERED
class PhsenAbcdefDclMetadataTelemeteredDataParticle(PhsenAbcdefDclMetadataDataParticle):
_data_particle_type = DataParticleType.METADATA_TELEMETERED
class PhsenAbcdefDclInstrumentRecoveredDataParticle(PhsenAbcdefDclInstrumentDataParticle):
_data_particle_type = DataParticleType.INSTRUMENT_RECOVERED
class PhsenAbcdefDclInstrumentTelemeteredDataParticle(PhsenAbcdefDclInstrumentDataParticle):
_data_particle_type = DataParticleType.INSTRUMENT_TELEMETERED
class DataTypeEnum(BaseEnum):
UNKNOWN = 0
INSTRUMENT = 1
CONTROL = 2
class PhsenAbcdefDclParser(SimpleParser):
def __init__(self,
config,
stream_handle,
exception_callback):
# whitespace regex
self._whitespace_regex = re.compile(ONE_OR_MORE_WHITESPACE_REGEX)
# instrument data regex: *
self._instrument_data_regex = re.compile(r'\*')
particle_classes_dict = config.get(DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT)
self._instrument_data_particle_class = particle_classes_dict.get('data_particle_class_key')
self._metadata_particle_class = particle_classes_dict.get('metadata_particle_class_key')
super(PhsenAbcdefDclParser, self).__init__(config, stream_handle, exception_callback)
self.working_record = ""
self.in_record = False
self.latest_dcl_time = ""
self.result_particle_list = []
def _strip_logfile_line(self, logfile_line):
"""
Strips any trailing newline and linefeed from the logfile line,
and strips the leading DLC time from the logfile line
"""
# strip off any trailing linefeed or newline hidden characters
working_logfile_line = logfile_line.rstrip('\r\n')
# strip off the preceding 24 characters (the DCL time) of the log line
stripped_logfile_line = self._strip_time(working_logfile_line)
return stripped_logfile_line
def _strip_time(self, logfile_line):
# strip off the leading 24 characters of the log line
stripped_logfile_line = logfile_line[24:]
# save off this DLC time in case this is the last DCL time recorded before the next record begins
self.latest_dcl_time = logfile_line[:23]
return stripped_logfile_line
def _process_instrument_data(self, working_record):
"""
Determines which particle to produce, calls extract_sample to create the given particle
"""
log.debug("PhsenAbcdefDclParser._process_instrument_data(): aggregate working_record size %s is %s",
len(working_record), working_record)
# this size includes the leading * character
instrument_record_length = 465
# this size includes the leading * character
control_record_length_without_voltage_battery = 39
# this size includes the leading * character
control_record_length_with_voltage_battery = 43
data_type = self._determine_data_type(working_record)
# DCL controller timestamp is the port_timestamp
port_timestamp = dcl_time_to_ntp(self.latest_dcl_time)
if data_type is not DataTypeEnum.UNKNOWN:
# Create a tuple for the particle composed of the working record and latest DCL time
# The tuple allows for DCL time to be available when EXTERNAL calls each particle's
# build_parse_values method
particle_data = (self.latest_dcl_time, working_record)
if data_type is DataTypeEnum.INSTRUMENT:
# Per the IDD, if the candidate data is not the proper size, throw a recoverable exception
if len(working_record) == instrument_record_length:
# Create particle mule (to be used later to create the instrument particle)
particle = self._extract_sample(self._instrument_data_particle_class,
None,
particle_data,
port_timestamp=port_timestamp)
self._record_buffer.append(particle)
else:
self._exception_callback(RecoverableSampleException(
"PhsenAbcdefDclParser._process_instrument_data(): "
"Throwing RecoverableSampleException, Size of data "
"record is not the length of an instrument data record"))
elif data_type is DataTypeEnum.CONTROL:
# Per the IDD, if the candidate data is not the proper size, throw a recoverable exception
if len(working_record) == control_record_length_without_voltage_battery or \
len(working_record) == control_record_length_with_voltage_battery:
# Create particle mule (to be used later to create the metadata particle)
particle = self._extract_sample(self._metadata_particle_class,
None,
particle_data,
port_timestamp=port_timestamp)
self._record_buffer.append(particle)
else:
log.warn("PhsenAbcdefDclParser._process_instrument_data(): "
"Size of data record is not the length of a control data record")
self._exception_callback(RecoverableSampleException(
"PhsenAbcdefDclParser._process_instrument_data(): "
"Throwing RecoverableSampleException, Size of data "
"record is not the length of a control data record"))
else:
log.warn("PhsenAbcdefDclParser._process_instrument_data(): "
"Throwing RecoverableSampleException, Record is neither instrument or control")
self._exception_callback(RecoverableSampleException("PhsenAbcdefDclParser._process_instrument_data(): "
"Data Type is neither Control or Instrument"))
@staticmethod
def _determine_data_type(working_record):
# strip out the type from the working record
type_ascii_hex = working_record[5:7]
# convert to a 16 bit unsigned int
type_int = int(type_ascii_hex, 16)
# allowable Control record hex values are from the SAMI_error_info_control_records spreadsheet
is_control_record = re.search(r'80|81|83|85|86|87|BE|BF|C0|C1|C2|C3|C4|C5|C6|FE|FF', type_ascii_hex)
# Type checks, per values defined in the IDD
if type_int == 10:
return DataTypeEnum.INSTRUMENT
elif is_control_record:
return DataTypeEnum.CONTROL
else:
return DataTypeEnum.UNKNOWN
def parse_file(self):
"""
Parse data file line by line. If the line
it is a valid data piece, build a particle, append to buffer
"""
for line in self._stream_handle:
is_bracket_present = re.search(r'\[', line)
# check for a * in this line, signaling the start of a new record
is_star_present = re.search(r'\*', line)
# if this line has a bracket it should not be processed...
if is_bracket_present:
# if the line has a bracket AND data has been previously parsed...
if self.in_record:
# if the aggregate working record is not empty,
# the working record is complete and a particle can now be created
if self.working_record:
# PROCESS WORKING STRING TO CREATE A PARTICLE
self._process_instrument_data(self.working_record)
# clear out the working record (the last string that was being built)
self.working_record = ""
# if the line has a bracket and data has NOT been previously parsed,
# do nothing (this is one of the first chunks seen by this parser)
# if the line does NOT have a bracket, it contains instrument or control log data
else:
# if the * character is present this is the first piece of data for an instrument or control log
if is_star_present:
# strip the trailing newlines and carriage returns from the string
# strip the leading DCL data/time data from the string
# save off the DLC time
stripped_logfile_line = self._strip_logfile_line(line)
# the working record should be empty when a new star is found
if len(self.working_record) > 0:
# clear the working record, it must contain bad or start of day data
# clear out the working record (the last string that was being built)
self.working_record = ""
log.warn("PhsenAbcdefDclParser.parse_chunks(): "
"found a new star but working_record is non-zero length, "
"throwing a RecoverableSample exception")
self._exception_callback(RecoverableSampleException(
"PhsenAbcdefDclParser.parse_chunks(): "
"found a new record to parse but "
"working_record is non-zero length, "
"throwing a RecoverableSample exception"))
# append time_stripped_logfile_line to working_record
self.working_record += stripped_logfile_line
# this is the first time a * has been found, set a flag
self.in_record = True
# if there is no * character in this line,
# it is the next part of an instrument or control log file
# and will be appended to the previous portion of the log file
else:
# strip the trailing newlines and carriage returns from the string
# strip the leading DCL data/time data from the string
# save off the DLC time
stripped_logfile_line = self._strip_logfile_line(line)
# append time_stripped_logfile_line to working_record
self.working_record += stripped_logfile_line
# Per the IDD, it is possible for a single instrument data record to span multiple files, when the record is
# being written out as the day changes. Since the software architecture does not support parsing a single
# particle from multiple files, a recoverable sample exception should be issued in this case.
if len(self.working_record) > 0:
log.warn("PhsenAbcdefDclParser.parse_chunks(): "
"working_record is non-zero length, throwing a RecoverableSample exception")
self._exception_callback(RecoverableSampleException("PhsenAbcdefDclParser.parse_chunks(): "
"working_record is non-zero length, "
"throwing a RecoverableSample exception"))
| janeen666/mi-instrument | mi/dataset/parser/phsen_abcdef_dcl.py | Python | bsd-2-clause | 31,949 |
import math
"""
Project Euler - Problem 9
"""
def triplet_product(triplet):
for x in range(1, 300):
temp = []
temp.append(x * triplet[0])
temp.append(x * triplet[1])
temp.append(x * triplet[2])
if sum(temp) == 1000:
return temp[0] * temp[1] * temp[2]
print triplet_product([3, 4, 5])
print triplet_product([5, 12, 13])
print triplet_product([6, 8, 10])
print triplet_product([7, 24, 25])
print triplet_product([8, 15, 17]) | kkorekk/ProjectEuler | Problem_9/pythagorean_triplet.py | Python | mit | 469 |
import os, socket, sys, threading, time, pdb
from collections import OrderedDict as OD
from queue import Queue
from .server import proxy
class SocketIO:
def __init__(self, ip_addr, port, fname, fsz=0, send=True):
#self.DEV_TX_PORT = 8888
#self.DEV_RX_PORT = 8889
self.fsz = fsz
self.ip_addr = ip_addr
self.port = port
self.send = send
self.fname = fname
def create_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.ip_addr, self.port))
except socket.error:
s.close()
print('could not connect')
return
if s is None:
print('could not open socket')
return
return s
def open_file(self):
if type(self.fname) == str:
return open(self.fname, 'rb' if self.send else 'wb')
elif type(self.fname) == Queue:
return self.fname
def close_file(self, f):
if type(self.fname) == str:
f.close()
def write_data(self, f, d):
if type(f) == Queue:
f.put(d)
else:
f.write(d)
def read_data(self, f, sz1):
if type(f) == Queue:
try:
data = f.get_nowait()
return data.encode('ascii')
except:
pass
else:
return f.read(sz1)
def data_io_thread(self):
fsz = self.fsz
s = self.create_socket()
if not s:
return
f = self.open_file()
if not f:
s.close()
return
sz1 = 512
szo = 0
while True:
tmp = fsz - szo
tmp = sz1 if tmp > sz1 else tmp
if self.send:
d = self.read_data(f, sz1)
if d == None:
break
s.send(d)
else:
d = s.recv(tmp)
self.write_data(f, d)
szo += len(d)
self.caller_func.progress = int(100.*szo/fsz)
update_progress(self.caller_func.progress)
if szo >= fsz:
break
self.close_file(f)
s.close()
def data_io(self, caller_func=None):
if not caller_func:
return '0'
if hasattr(caller_func, 't'):
if caller_func.t.is_alive():
return '0'
if self.fsz:
self.caller_func = caller_func
self.caller_func.progress = 0
caller_func.t = threading.Thread(target=self.data_io_thread)
caller_func.t.start()
return '0x%X' % self.fsz
def get_fsz(fname=None, fsz=0):
if fsz:
if type(fsz) == str:
if fsz.lower()[:2] == '0x':
fsz = int(fsz, 16)
else:
fsz = int(fsz)
if not fsz and type(fname) == str:
fsz = os.path.getsize(fname)
return fsz
def update_progress(progress):
s = '\r[{0}] {1}%'.format('#'*(int(progress/10)), '%d' % progress)
try:
sys.stdout.write(s)
sys.stdout.flush()
except:
pass
if progress == 100:
print()
def chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield '0x' + str(l[i:i+n], 'ascii')
def send_data(ip_addr, port, fname, fsz=''):
'''
Отправить данные из файла в устройство
@param ip_addr - ip-адрес устройства
@param fname - имя файла
@return fsz - хорошо, None - ошибка
'''
if not fsz:
fsz = get_fsz(fname, fsz)
if fsz:
sktio = SocketIO(ip_addr, port, fname, fsz=fsz, send=True)
return sktio.data_io(caller_func=send_data)
def recv_data(ip_addr, port, fname, fsz):
'''
Получить данные из устройства и записать в файл
@param ip_addr - ip-адрес устройства
@param fname - имя файла
@param fsz - размер файла
@return fsz - хорошо, None - ошибка
'''
if not fsz:
fsz = get_fsz(fname, fsz)
if fsz:
myio = SocketIO(ip_addr, port, fname, fsz=fsz, send=False)
return myio.data_io(caller_func=recv_data)
| ivanovev/start | util/socketio.py | Python | gpl-3.0 | 4,348 |
import numpy as np
from rampwf.score_types.soft_accuracy import SoftAccuracy
score_matrix_1 = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
])
score_matrix_2 = np.array([
[1, 0.5, 0],
[0.3, 1, 0.3],
[0, 0.5, 1],
])
y_true_proba_1 = np.array([1, 0, 0])
y_true_proba_2 = np.array([0, 1, 0])
y_true_proba_3 = np.array([0.5, 0.5, 0])
y_proba_1 = np.array([1, 0, 0])
y_proba_2 = np.array([0, 1, 0])
y_proba_3 = np.array([0, 0.1, 0])
y_proba_4 = np.array([-1, 0.1, -2])
y_proba_5 = np.array([0, 0, 0])
y_proba_6 = np.array([0.5, -1, 3])
def test_soft_accuracy():
score_1 = SoftAccuracy(score_matrix=score_matrix_1)
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_1])) == 1
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_2])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_3])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_4])) == 0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_1])) == 0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_2])) == 1
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_3])) == 1
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_4])) == 1
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_1])) == 0.5
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_2])) == 0.5
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_5])) == 0.0
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_5])) == 0.0
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_5])) == 0
assert score_1(np.array([y_true_proba_1]), np.array([y_proba_6])) == 1 / 3
assert score_1(np.array([y_true_proba_2]), np.array([y_proba_6])) == 0.0
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_6])) == 1 / 6
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_3])) == 0.5
assert score_1(np.array([y_true_proba_3]), np.array([y_proba_4])) == 0.5
score_2 = SoftAccuracy(score_matrix=score_matrix_2)
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_1])) == 1
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_2])) == 0.5
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_3])) == 0.5
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_4])) == 0.5
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_1])) == 0.3
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_2])) == 1
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_3])) == 1
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_4])) == 1
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_1])) == 0.65
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_2])) == 0.75
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_5])) == 0.0
assert score_2(np.array([y_true_proba_1]), np.array([y_proba_6])) == 1 / 3
assert score_2(np.array([y_true_proba_2]), np.array([y_proba_6])) == 0.3
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_3])) == 0.75
assert score_2(np.array([y_true_proba_3]), np.array([y_proba_4])) == 0.75
| paris-saclay-cds/ramp-workflow | rampwf/score_types/tests/test_soft_accuracy.py | Python | bsd-3-clause | 3,379 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1280, 849)
MainWindow.setStyleSheet("QMainWindow {\n"
" background-color: #ffffff;\n"
" font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QMenuBar {\n"
" background-color: #ffffff;\n"
" color: #ffffff;\n"
" font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QMenuBar::item {\n"
" color: #2c7fb8;\n"
" border-radius: 4px\n"
"}\n"
"\n"
"QMenuBar::item:selected {\n"
" color: #2c7fb8\n"
"}\n"
"\n"
"QPushButton {\n"
" background-color: #FFFFFF;\n"
"border-top-left-radius: 5px;\n"
"border-top-right-radius: 5px;\n"
"border-bottom-left-radius: 5px;\n"
"border-bottom-right-radius: 5px;\n"
"border: 1.5px solid #C4C4C3;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"min-width: 10ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif;\n"
"font-weight: 500;\n"
"font-size: 15px;\n"
"color: #2c7fb8;\n"
"}\n"
"\n"
"QWidget {\n"
" background-color: #ffffff\n"
"}\n"
"\n"
"QTabWidget {\n"
"background-color: #ffffff\n"
"}\n"
"\n"
"QLabel {\n"
"color: #2c7fb8;\n"
"font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QDoubleSpinBox {\n"
" background-color: #FFFFFF;\n"
" color: #2c7fb8;\n"
" font-size: 15px;\n"
"border: 1.5px solid #C4C4C3;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"border-top-left-radius: 5px;\n"
"border-top-right-radius: 5px;\n"
"border-bottom-left-radius: 5px;\n"
"border-bottom-right-radius: 5px;\n"
"min-width: 10ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QSpinBox {\n"
" background-color: #FFFFFF;\n"
" color: #2c7fb8;\n"
" font-size: 15px;\n"
"border: 1.5px solid #C4C4C3;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"border-top-left-radius: 5px;\n"
"border-top-right-radius: 5px;\n"
"border-bottom-left-radius: 5px;\n"
"border-bottom-right-radius: 5px;\n"
"min-width: 10ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QComboBox {\n"
"background-color: #FFFFFF;\n"
"color: #2c7fb8;\n"
"font-size: 15px;\n"
"border: 2px solid #C4C4C3;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"border-top-left-radius: 5px;\n"
"border-top-right-radius: 5px;\n"
"border-bottom-left-radius: 5px;\n"
"border-bottom-right-radius: 5px;\n"
"min-width: 8ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif\n"
"}\n"
"\n"
"QTabWidget::pane { /* The tab widget frame */\n"
"border-top: 2px solid #C2C7CB;\n"
"}\n"
"QTabWidget::tab-bar {\n"
"left: 5px; /* move to the right by 5px */\n"
"}\n"
"/* Style the tab using the tab sub-control. Note that it reads QTabBar _not_ QTabWidget */\n"
"QTabBar::tab {\n"
"background: #FFFFFF;\n"
"color: #2c7fb8;\n"
"border: 2px solid #FFFFFF;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"border-top-left-radius: 4px;\n"
"border-top-right-radius: 4px;\n"
"min-width: 8ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif\n"
"}\n"
"QTabBar::tab:selected, QTabBar::tab:hover {\n"
"background: #edf8b1;\n"
"color: #2c7fb8;\n"
"font-family: \'Roboto\';\n"
"font-weight: 600\n"
"}\n"
"QTabBar::tab:selected {\n"
"border-color: #edf8b1;\n"
"border-bottom-color: #C2C7CB; /* same as pane color */\n"
"}\n"
"QTabBar::tab:!selected {\n"
"margin-top: 2px; /* make non-selected tabs look smaller */\n"
"}\n"
"QTableWidget {\n"
" background-color: #FFFFFF;\n"
"border-top-left-radius: 5px;\n"
"border-top-right-radius: 5px;\n"
"border-bottom-left-radius: 5px;\n"
"border-bottom-right-radius: 5px;\n"
"border: 1.5px solid #C4C4C3;\n"
"border-bottom-color: #C2C7CB; /* same as the pane color */\n"
"min-width: 10ex;\n"
"padding: 2px;\n"
"font-family: \'Roboto\', sans-serif;\n"
"font-weight: 500;\n"
"font-size: 15px;\n"
"color: #2c7fb8;\n"
"}\n"
"\n"
"QGraphicsView {\n"
"font-family: \'Robot\', sans-serif;\n"
"font-size: 15px\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShadow(QtWidgets.QFrame.Raised)
self.line.setLineWidth(3)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 0, 1, 1, 1)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.graphicsViewMain = PlotWidget(self.centralwidget)
self.graphicsViewMain.setFrameShadow(QtWidgets.QFrame.Raised)
self.graphicsViewMain.setLineWidth(2)
self.graphicsViewMain.setObjectName("graphicsViewMain")
self.verticalLayout_5.addWidget(self.graphicsViewMain)
self.graphicsViewOverview = QtWidgets.QGraphicsView(self.centralwidget)
self.graphicsViewOverview.setMaximumSize(QtCore.QSize(16777215, 150))
self.graphicsViewOverview.setObjectName("graphicsViewOverview")
self.verticalLayout_5.addWidget(self.graphicsViewOverview)
self.gridLayout.addLayout(self.verticalLayout_5, 0, 2, 1, 1)
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setMaximumSize(QtCore.QSize(270, 16777215))
self.tabWidget.setStyleSheet("")
self.tabWidget.setObjectName("tabWidget")
self.tabScansettings = QtWidgets.QWidget()
self.tabScansettings.setObjectName("tabScansettings")
self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.tabScansettings)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(10, 0, 251, 671))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.line_6 = QtWidgets.QFrame(self.verticalLayoutWidget_3)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.verticalLayout_3.addWidget(self.line_6)
self.label_16 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.label_16.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_16.setFont(font)
self.label_16.setStyleSheet("")
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.verticalLayout_3.addWidget(self.label_16)
self.formLayout_3 = QtWidgets.QFormLayout()
self.formLayout_3.setObjectName("formLayout_3")
self.label_12 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.labelScanNum = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.labelScanNum.setMinimumSize(QtCore.QSize(100, 0))
self.labelScanNum.setMaximumSize(QtCore.QSize(100, 30))
self.labelScanNum.setFrameShape(QtWidgets.QFrame.Box)
self.labelScanNum.setLineWidth(1)
self.labelScanNum.setText("")
self.labelScanNum.setObjectName("labelScanNum")
self.formLayout_3.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelScanNum)
self.label_13 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.labelShotCount = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.labelShotCount.setMinimumSize(QtCore.QSize(100, 0))
self.labelShotCount.setMaximumSize(QtCore.QSize(100, 30))
self.labelShotCount.setFrameShape(QtWidgets.QFrame.Box)
self.labelShotCount.setText("")
self.labelShotCount.setObjectName("labelShotCount")
self.formLayout_3.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.labelShotCount)
self.label_14 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.labelTuning = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.labelTuning.setMinimumSize(QtCore.QSize(100, 0))
self.labelTuning.setMaximumSize(QtCore.QSize(100, 30))
self.labelTuning.setFrameShape(QtWidgets.QFrame.Box)
self.labelTuning.setText("")
self.labelTuning.setObjectName("labelTuning")
self.formLayout_3.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.labelTuning)
self.label_15 = QtWidgets.QLabel(self.verticalLayoutWidget_3)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_15.setFont(font)
self.label_15.setObjectName("label_15")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_15)
self.labelAttenuation = QtWidgets.QLabel(self.verticalLayoutWidget_3)
self.labelAttenuation.setMinimumSize(QtCore.QSize(100, 0))
self.labelAttenuation.setMaximumSize(QtCore.QSize(100, 30))
self.labelAttenuation.setFrameShape(QtWidgets.QFrame.Box)
self.labelAttenuation.setText("")
self.labelAttenuation.setObjectName("labelAttenuation")
self.formLayout_3.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.labelAttenuation)
self.verticalLayout_3.addLayout(self.formLayout_3)
self.tabWidget.addTab(self.tabScansettings, "")
self.tabFIDsettings = QtWidgets.QWidget()
self.tabFIDsettings.setObjectName("tabFIDsettings")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.tabFIDsettings)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 282, 750))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.line_3 = QtWidgets.QFrame(self.verticalLayoutWidget_2)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout_2.addWidget(self.line_3)
self.label_6 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_6.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout_2.addWidget(self.label_6)
self.formLayout_2 = QtWidgets.QFormLayout()
self.formLayout_2.setObjectName("formLayout_2")
self.label_7 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_7)
self.comboBoxWindowFunction = QtWidgets.QComboBox(self.verticalLayoutWidget_2)
self.comboBoxWindowFunction.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.comboBoxWindowFunction.setObjectName("comboBoxWindowFunction")
self.comboBoxWindowFunction.addItem("")
self.comboBoxWindowFunction.addItem("")
self.comboBoxWindowFunction.addItem("")
self.comboBoxWindowFunction.addItem("")
self.comboBoxWindowFunction.addItem("")
self.comboBoxWindowFunction.addItem("")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.comboBoxWindowFunction)
self.label_8 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_8)
self.spinBoxExpFilter = QtWidgets.QSpinBox(self.verticalLayoutWidget_2)
self.spinBoxExpFilter.setToolTip("")
self.spinBoxExpFilter.setToolTipDuration(2)
self.spinBoxExpFilter.setMaximum(1000)
self.spinBoxExpFilter.setSingleStep(10)
self.spinBoxExpFilter.setObjectName("spinBoxExpFilter")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.spinBoxExpFilter)
self.label_9 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_9)
self.spinBoxHighPass = QtWidgets.QSpinBox(self.verticalLayoutWidget_2)
self.spinBoxHighPass.setMaximum(1000)
self.spinBoxHighPass.setSingleStep(5)
self.spinBoxHighPass.setProperty("value", 0)
self.spinBoxHighPass.setObjectName("spinBoxHighPass")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.spinBoxHighPass)
self.label_18 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_18.setFont(font)
self.label_18.setObjectName("label_18")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_18)
self.spinBoxLowPass = QtWidgets.QSpinBox(self.verticalLayoutWidget_2)
self.spinBoxLowPass.setMaximum(1000)
self.spinBoxLowPass.setSingleStep(5)
self.spinBoxLowPass.setProperty("value", 0)
self.spinBoxLowPass.setObjectName("spinBoxLowPass")
self.formLayout_2.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.spinBoxLowPass)
self.label_10 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.label_10)
self.spinBoxDelay = QtWidgets.QSpinBox(self.verticalLayoutWidget_2)
self.spinBoxDelay.setSuffix("")
self.spinBoxDelay.setMaximum(1000)
self.spinBoxDelay.setObjectName("spinBoxDelay")
self.formLayout_2.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.spinBoxDelay)
self.spinBoxScanID = QtWidgets.QSpinBox(self.verticalLayoutWidget_2)
self.spinBoxScanID.setMaximum(10000000)
self.spinBoxScanID.setObjectName("spinBoxScanID")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.spinBoxScanID)
self.label_19 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_19.setFont(font)
self.label_19.setObjectName("label_19")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_19)
self.verticalLayout_2.addLayout(self.formLayout_2)
self.line_4 = QtWidgets.QFrame(self.verticalLayoutWidget_2)
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.verticalLayout_2.addWidget(self.line_4)
self.label_11 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_11.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.verticalLayout_2.addWidget(self.label_11)
self.graphicsViewFID = PlotWidget(self.verticalLayoutWidget_2)
self.graphicsViewFID.setMinimumSize(QtCore.QSize(0, 180))
self.graphicsViewFID.setMaximumSize(QtCore.QSize(16777215, 180))
self.graphicsViewFID.setFrameShadow(QtWidgets.QFrame.Raised)
self.graphicsViewFID.setLineWidth(2)
self.graphicsViewFID.setObjectName("graphicsViewFID")
self.verticalLayout_2.addWidget(self.graphicsViewFID)
self.pushButtonPickDoppler = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.pushButtonPickDoppler.setObjectName("pushButtonPickDoppler")
self.verticalLayout_2.addWidget(self.pushButtonPickDoppler)
self.pushButtonAutofit = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.pushButtonAutofit.setObjectName("pushButtonAutofit")
self.verticalLayout_2.addWidget(self.pushButtonAutofit)
self.formLayout_4 = QtWidgets.QFormLayout()
self.formLayout_4.setObjectName("formLayout_4")
self.label_17 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_17.setFont(font)
self.label_17.setObjectName("label_17")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_17)
self.labelCenterFrequency = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.labelCenterFrequency.setMinimumSize(QtCore.QSize(100, 0))
self.labelCenterFrequency.setMaximumSize(QtCore.QSize(100, 30))
self.labelCenterFrequency.setFrameShape(QtWidgets.QFrame.Box)
self.labelCenterFrequency.setText("")
self.labelCenterFrequency.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.labelCenterFrequency.setObjectName("labelCenterFrequency")
self.formLayout_4.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCenterFrequency)
self.labelDopplerSplitting = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.labelDopplerSplitting.setMinimumSize(QtCore.QSize(100, 0))
self.labelDopplerSplitting.setMaximumSize(QtCore.QSize(100, 30))
self.labelDopplerSplitting.setFrameShape(QtWidgets.QFrame.Box)
self.labelDopplerSplitting.setText("")
self.labelDopplerSplitting.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.labelDopplerSplitting.setObjectName("labelDopplerSplitting")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.labelDopplerSplitting)
self.label_20 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_20.setFont(font)
self.label_20.setObjectName("label_20")
self.formLayout_4.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_20)
self.labelFWHM = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.labelFWHM.setMinimumSize(QtCore.QSize(100, 0))
self.labelFWHM.setMaximumSize(QtCore.QSize(100, 30))
self.labelFWHM.setFrameShape(QtWidgets.QFrame.Box)
self.labelFWHM.setText("")
self.labelFWHM.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.labelFWHM.setObjectName("labelFWHM")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.labelFWHM)
self.label_21 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.formLayout_4.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_21)
self.verticalLayout_2.addLayout(self.formLayout_4)
self.tabWidget.addTab(self.tabFIDsettings, "")
self.tabPeaksettings = QtWidgets.QWidget()
self.tabPeaksettings.setObjectName("tabPeaksettings")
self.verticalLayoutWidget = QtWidgets.QWidget(self.tabPeaksettings)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 261, 681))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.line_5 = QtWidgets.QFrame(self.verticalLayoutWidget)
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.verticalLayout.addWidget(self.line_5)
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label_3 = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.label_4 = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_5)
self.checkBoxDetectPeaks = QtWidgets.QCheckBox(self.verticalLayoutWidget)
self.checkBoxDetectPeaks.setText("")
self.checkBoxDetectPeaks.setObjectName("checkBoxDetectPeaks")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.checkBoxDetectPeaks)
self.doubleSpinBoxPeakSNRThres = QtWidgets.QDoubleSpinBox(self.verticalLayoutWidget)
self.doubleSpinBoxPeakSNRThres.setMaximum(10.0)
self.doubleSpinBoxPeakSNRThres.setSingleStep(0.01)
self.doubleSpinBoxPeakSNRThres.setProperty("value", 0.3)
self.doubleSpinBoxPeakSNRThres.setObjectName("doubleSpinBoxPeakSNRThres")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxPeakSNRThres)
self.doubleSpinBoxPeakMinDist = QtWidgets.QDoubleSpinBox(self.verticalLayoutWidget)
self.doubleSpinBoxPeakMinDist.setProperty("value", 1.0)
self.doubleSpinBoxPeakMinDist.setObjectName("doubleSpinBoxPeakMinDist")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxPeakMinDist)
self.verticalLayout.addLayout(self.formLayout)
self.line_2 = QtWidgets.QFrame(self.verticalLayoutWidget)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label_2.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setFamily("Roboto,sans-serif")
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.tableWidgetPeakTable = QtWidgets.QTableWidget(self.verticalLayoutWidget)
self.tableWidgetPeakTable.setObjectName("tableWidgetPeakTable")
self.tableWidgetPeakTable.setColumnCount(0)
self.tableWidgetPeakTable.setRowCount(0)
self.verticalLayout.addWidget(self.tableWidgetPeakTable)
self.tabWidget.addTab(self.tabPeaksettings, "")
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 22))
self.menubar.setStyleSheet("QMenuBar {\n"
" background-color: qlineargradient(x1:0, y1:0, x2:0, y2:1,\n"
" stop:0 lightgray, stop:1 darkgray);\n"
"}\n"
"\n"
"QMenuBar::item {\n"
" spacing: 3px; /* spacing between menu bar items */\n"
" padding: 1px 4px;\n"
" background: transparent;\n"
" border-radius: 4px;\n"
"}\n"
"\n"
"QMenuBar::item:selected { /* when selected using mouse or keyboard */\n"
" background: #a8a8a8;\n"
"}\n"
"\n"
"QMenuBar::item:pressed {\n"
" background: #888888;\n"
"}")
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuLoad = QtWidgets.QMenu(self.menuFile)
self.menuLoad.setObjectName("menuLoad")
self.menuExport = QtWidgets.QMenu(self.menuFile)
self.menuExport.setObjectName("menuExport")
self.menuFitting = QtWidgets.QMenu(self.menubar)
self.menuFitting.setObjectName("menuFitting")
self.menuOverlays = QtWidgets.QMenu(self.menubar)
self.menuOverlays.setObjectName("menuOverlays")
self.menuLoad_overlay = QtWidgets.QMenu(self.menuOverlays)
self.menuLoad_overlay.setObjectName("menuLoad_overlay")
MainWindow.setMenuBar(self.menubar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionLoad_spectrum = QtWidgets.QAction(MainWindow)
self.actionLoad_spectrum.setObjectName("actionLoad_spectrum")
self.actionSave_spectrum = QtWidgets.QAction(MainWindow)
self.actionSave_spectrum.setObjectName("actionSave_spectrum")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionDisplay_Test = QtWidgets.QAction(MainWindow)
self.actionDisplay_Test.setObjectName("actionDisplay_Test")
self.actionLoad_FID = QtWidgets.QAction(MainWindow)
self.actionLoad_FID.setObjectName("actionLoad_FID")
self.actionSpectrum = QtWidgets.QAction(MainWindow)
self.actionSpectrum.setObjectName("actionSpectrum")
self.actionFID = QtWidgets.QAction(MainWindow)
self.actionFID.setObjectName("actionFID")
self.actionCAT = QtWidgets.QAction(MainWindow)
self.actionCAT.setObjectName("actionCAT")
self.actionSettings = QtWidgets.QAction(MainWindow)
self.actionSettings.setObjectName("actionSettings")
self.actionCAT_file = QtWidgets.QAction(MainWindow)
self.actionCAT_file.setObjectName("actionCAT_file")
self.actionPeaks = QtWidgets.QAction(MainWindow)
self.actionPeaks.setObjectName("actionPeaks")
self.actionSave_FID = QtWidgets.QAction(MainWindow)
self.actionSave_FID.setObjectName("actionSave_FID")
self.actionSave_peaks = QtWidgets.QAction(MainWindow)
self.actionSave_peaks.setObjectName("actionSave_peaks")
self.actionFit_Gaussian = QtWidgets.QAction(MainWindow)
self.actionFit_Gaussian.setCheckable(False)
self.actionFit_Gaussian.setObjectName("actionFit_Gaussian")
self.actionPick_Gaussians = QtWidgets.QAction(MainWindow)
self.actionPick_Gaussians.setObjectName("actionPick_Gaussians")
self.actionBatch = QtWidgets.QAction(MainWindow)
self.actionBatch.setObjectName("actionBatch")
self.actionDR_scan = QtWidgets.QAction(MainWindow)
self.actionDR_scan.setObjectName("actionDR_scan")
self.actionManual_FFT_peaks = QtWidgets.QAction(MainWindow)
self.actionManual_FFT_peaks.setObjectName("actionManual_FFT_peaks")
self.actionFFT_fits = QtWidgets.QAction(MainWindow)
self.actionFFT_fits.setObjectName("actionFFT_fits")
self.actionOverlay_settings = QtWidgets.QAction(MainWindow)
self.actionOverlay_settings.setObjectName("actionOverlay_settings")
self.actionLegacy_mmw = QtWidgets.QAction(MainWindow)
self.actionLegacy_mmw.setObjectName("actionLegacy_mmw")
self.actionFTB = QtWidgets.QAction(MainWindow)
self.actionFTB.setObjectName("actionFTB")
self.actionSave_peaks_2 = QtWidgets.QAction(MainWindow)
self.actionSave_peaks_2.setObjectName("actionSave_peaks_2")
self.actionStick_spectrum = QtWidgets.QAction(MainWindow)
self.actionStick_spectrum.setObjectName("actionStick_spectrum")
self.actionCompress_data = QtWidgets.QAction(MainWindow)
self.actionCompress_data.setObjectName("actionCompress_data")
self.actionQtFTMScan = QtWidgets.QAction(MainWindow)
self.actionQtFTMScan.setObjectName("actionQtFTMScan")
self.actionCreate_batch = QtWidgets.QAction(MainWindow)
self.actionCreate_batch.setObjectName("actionCreate_batch")
self.menuLoad.addAction(self.actionSpectrum)
self.menuLoad.addAction(self.actionFID)
self.menuLoad.addSeparator()
self.menuLoad.addAction(self.actionBatch)
self.menuLoad.addAction(self.actionLegacy_mmw)
self.menuExport.addAction(self.actionFTB)
self.menuExport.addAction(self.actionSave_peaks_2)
self.menuExport.addAction(self.actionStick_spectrum)
self.menuFile.addAction(self.actionCreate_batch)
self.menuFile.addAction(self.menuLoad.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave_FID)
self.menuFile.addAction(self.actionSave_spectrum)
self.menuFile.addAction(self.menuExport.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSettings)
self.menuFile.addAction(self.actionExit)
self.menuFitting.addAction(self.actionFit_Gaussian)
self.menuFitting.addAction(self.actionManual_FFT_peaks)
self.menuFitting.addAction(self.actionPick_Gaussians)
self.menuFitting.addSeparator()
self.menuLoad_overlay.addAction(self.actionCAT_file)
self.menuLoad_overlay.addAction(self.actionPeaks)
self.menuOverlays.addAction(self.menuLoad_overlay.menuAction())
self.menuOverlays.addAction(self.actionOverlay_settings)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuFitting.menuAction())
self.menubar.addAction(self.menuOverlays.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "FTSpecViewer"))
self.graphicsViewMain.setStatusTip(_translate("MainWindow", "Main spectrum window"))
self.graphicsViewOverview.setStatusTip(_translate("MainWindow", "Overview window"))
self.label_16.setText(_translate("MainWindow", "Scan Details"))
self.label_12.setText(_translate("MainWindow", "Scan number"))
self.label_13.setText(_translate("MainWindow", "Shot count"))
self.label_14.setText(_translate("MainWindow", "Tuning voltage"))
self.label_15.setText(_translate("MainWindow", "Attenuation"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabScansettings), _translate("MainWindow", "Details"))
self.label_6.setText(_translate("MainWindow", "FID Processing"))
self.label_7.setText(_translate("MainWindow", "Window Function"))
self.comboBoxWindowFunction.setStatusTip(_translate("MainWindow", "Apply a window function to the FID."))
self.comboBoxWindowFunction.setItemText(0, _translate("MainWindow", "none"))
self.comboBoxWindowFunction.setItemText(1, _translate("MainWindow", "blackman"))
self.comboBoxWindowFunction.setItemText(2, _translate("MainWindow", "blackmanharris"))
self.comboBoxWindowFunction.setItemText(3, _translate("MainWindow", "boxcar"))
self.comboBoxWindowFunction.setItemText(4, _translate("MainWindow", "hanning"))
self.comboBoxWindowFunction.setItemText(5, _translate("MainWindow", "bartlett"))
self.label_8.setText(_translate("MainWindow", "Exponential Filter"))
self.spinBoxExpFilter.setStatusTip(_translate("MainWindow", "Apply an exponential filter (microseconds)"))
self.label_9.setText(_translate("MainWindow", "High-Pass Filter"))
self.spinBoxHighPass.setStatusTip(_translate("MainWindow", "Apply a high-pass filter (kHz)"))
self.label_18.setText(_translate("MainWindow", "Low-Pass Filter"))
self.spinBoxLowPass.setStatusTip(_translate("MainWindow", "Apply a low-pass filter (kHz)"))
self.label_10.setText(_translate("MainWindow", "Delay"))
self.spinBoxDelay.setStatusTip(_translate("MainWindow", "Delay the FID processing (microseconds)"))
self.label_19.setText(_translate("MainWindow", "Scan ID"))
self.label_11.setText(_translate("MainWindow", "FID trace"))
self.graphicsViewFID.setStatusTip(_translate("MainWindow", "FID trace"))
self.pushButtonPickDoppler.setText(_translate("MainWindow", "Manual Doppler fit"))
self.pushButtonAutofit.setText(_translate("MainWindow", "Autofit"))
self.label_17.setText(_translate("MainWindow", "Center Frequency"))
self.label_20.setText(_translate("MainWindow", "Doppler-Splitting"))
self.label_21.setText(_translate("MainWindow", "FWHM"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabFIDsettings), _translate("MainWindow", "FID details"))
self.label.setText(_translate("MainWindow", "Detection Settings"))
self.label_3.setText(_translate("MainWindow", "Threshold"))
self.label_4.setText(_translate("MainWindow", "Minimum distance"))
self.label_5.setText(_translate("MainWindow", "Detect peaks"))
self.checkBoxDetectPeaks.setStatusTip(_translate("MainWindow", "Check to automatically detect peaks"))
self.doubleSpinBoxPeakSNRThres.setStatusTip(_translate("MainWindow", "Minimum SNR value for peak detection"))
self.doubleSpinBoxPeakMinDist.setStatusTip(_translate("MainWindow", "Minimum distance between detected peaks"))
self.label_2.setText(_translate("MainWindow", "Peak Table"))
self.tableWidgetPeakTable.setStatusTip(_translate("MainWindow", "Table of detected peaks in the spectrum"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabPeaksettings), _translate("MainWindow", "Peaks"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuLoad.setTitle(_translate("MainWindow", "Load..."))
self.menuExport.setTitle(_translate("MainWindow", "Export peaks..."))
self.menuFitting.setTitle(_translate("MainWindow", "Fitting"))
self.menuOverlays.setTitle(_translate("MainWindow", "Overlays"))
self.menuLoad_overlay.setTitle(_translate("MainWindow", "Load overlay..."))
self.actionLoad_spectrum.setText(_translate("MainWindow", "Load spectrum"))
self.actionSave_spectrum.setText(_translate("MainWindow", "Save spectrum"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionDisplay_Test.setText(_translate("MainWindow", "Display Test"))
self.actionLoad_FID.setText(_translate("MainWindow", "Load FID"))
self.actionSpectrum.setText(_translate("MainWindow", "Spectrum"))
self.actionFID.setText(_translate("MainWindow", "FID"))
self.actionCAT.setText(_translate("MainWindow", "CAT"))
self.actionSettings.setText(_translate("MainWindow", "Settings"))
self.actionCAT_file.setText(_translate("MainWindow", "CAT file"))
self.actionCAT_file.setStatusTip(_translate("MainWindow", "Load an SPCAT catalog file."))
self.actionPeaks.setText(_translate("MainWindow", "Peaks"))
self.actionPeaks.setStatusTip(_translate("MainWindow", "Load a plain text file containing frequencies and intensities"))
self.actionSave_FID.setText(_translate("MainWindow", "Save FID"))
self.actionSave_peaks.setText(_translate("MainWindow", "Save peaks"))
self.actionFit_Gaussian.setText(_translate("MainWindow", "Autofit FFT peaks"))
self.actionPick_Gaussians.setText(_translate("MainWindow", "Pick Gaussians"))
self.actionBatch.setText(_translate("MainWindow", "Batch"))
self.actionDR_scan.setText(_translate("MainWindow", "DR scan"))
self.actionManual_FFT_peaks.setText(_translate("MainWindow", "Manual FFT peaks"))
self.actionFFT_fits.setText(_translate("MainWindow", "FFT fitting"))
self.actionOverlay_settings.setText(_translate("MainWindow", "Overlay settings"))
self.actionLegacy_mmw.setText(_translate("MainWindow", "Legacy mmw"))
self.actionFTB.setText(_translate("MainWindow", "FTB"))
self.actionSave_peaks_2.setText(_translate("MainWindow", "ASCII"))
self.actionStick_spectrum.setText(_translate("MainWindow", "Stick spectrum"))
self.actionCompress_data.setText(_translate("MainWindow", "Compress data"))
self.actionQtFTMScan.setText(_translate("MainWindow", "QtFTM Scan"))
self.actionCreate_batch.setText(_translate("MainWindow", "Create batch"))
from pyqtgraph import PlotWidget
import fonts_rc
| laserkelvin/FTSpecViewer | python/qtmain.py | Python | gpl-3.0 | 38,680 |
import os
from urllib import parse as urlparse
def fix_catalog_url(url):
"""
Replace .html with .xml extension
"""
from os.path import splitext, join
u = urlparse.urlsplit(url)
name, ext = splitext(u.path)
if ext == ".html":
u = urlparse.urlsplit(url.replace(".html", ".xml"))
elif ext == '':
u = urlparse.urlsplit(join(url, "catalog.xml"))
return u.geturl()
def construct_url(url, href):
u = urlparse.urlsplit(url)
base_url = u.scheme + "://" + u.netloc
relative_path = urlparse.urljoin(base_url, os.path.split(u.path)[0])
if href[0] == "/":
# Absolute paths
cat = urlparse.urljoin(base_url, href)
elif href[0:4] == "http":
# Full HTTP links
cat = href
else:
# Relative paths.
cat = relative_path + "/" + href
return cat
def size_in_bytes(size, unit):
# Convert to bytes
if unit == "Kbytes":
size *= 1000.0
elif unit == "Mbytes":
size *= 1e+6
elif unit == "Gbytes":
size *= 1e+9
elif unit == "Tbytes":
size *= 1e+12
return int(size)
| bird-house/threddsclient | threddsclient/utils.py | Python | apache-2.0 | 1,127 |
'''
Defines the preferences dialog.
@author: Eitan Isaacson
@organization: Mozilla Foundation
@copyright: Copyright (c) 2006, 2007 Mozilla Foundation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import Atk as atk
from gi.repository.Gio import Settings as GSettings
from i18n import _
import node
from tools import parseColorString
class AccerciserPreferencesDialog(gtk.Dialog):
'''
Class that creates a preferences dialog.
'''
def __init__(self, plugins_view=None, hotkeys_view=None):
'''
Initialize a preferences dialog.
@param plugins_view: Treeview of plugins.
@type plugins_view: L{PluginManager._View}
@param hotkeys_view: Treeview of global hotkeys.
@type hotkeys_view: L{HotkeyTreeView}
'''
gtk.Dialog.__init__(self, _('accerciser Preferences'),
buttons=(gtk.STOCK_CLOSE, gtk.ResponseType.CLOSE))
self.connect('response', self._onResponse)
self.set_default_size(500,250)
notebook = gtk.Notebook()
vbox = self.get_children()[0]
vbox.add(notebook)
for view, section in [(plugins_view, _('Plugins')),
(hotkeys_view, _('Global Hotkeys'))]:
if view is not None:
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.ShadowType.IN)
sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC)
sw.set_size_request(500, 150)
sw.add(view)
notebook.append_page(sw, gtk.Label(section))
notebook.append_page(_HighlighterView(), gtk.Label(_('Highlighting')))
def _onResponse(self, dialog, response_id):
'''
Callback for dialog responses, always destroy it.
@param dialog: This dialog.
@type dialog: L{AccerciserPreferencesDialog}
@param response_id: Response ID recieved.
@type response_id: integer
'''
dialog.destroy()
class _HighlighterView(gtk.Alignment):
'''
A container widget with the settings for the highlighter.
'''
def __init__(self):
gtk.Alignment.__init__(self)
self.set_padding(12, 12, 18, 12)
self.gsettings = GSettings(schema='org.a11y.Accerciser')
self._buildUI()
def _buildUI(self):
'''
Programatically build the UI.
'''
table = gtk.Table(3, 2)
table.set_col_spacings(6)
self.add(table)
labels = [None, None, None]
controls = [None, None, None]
labels[0] = gtk.Label(_('Highlight duration:'))
controls[0] = gtk.SpinButton()
controls[0].set_range(0.01, 5)
controls[0].set_digits(2)
controls[0].set_value(self.gsettings.get_double('highlight-duration'))
controls[0].set_increments(0.01, 0.1)
controls[0].connect('value-changed', self._onDurationChanged)
labels[1] = gtk.Label(_('Border color:'))
controls[1] = self._ColorButton(node.BORDER_COLOR, node.BORDER_ALPHA)
controls[1].connect('color-set', self._onColorSet, 'highlight-border')
controls[1].set_tooltip_text(_('The border color of the highlight box'))
labels[2] = gtk.Label(_('Fill color:'))
controls[2] = self._ColorButton(node.FILL_COLOR, node.FILL_ALPHA)
controls[2].connect('color-set', self._onColorSet, 'highlight-fill')
controls[2].set_tooltip_text(_('The fill color of the highlight box'))
for label, control, row in zip(labels, controls, range(3)):
label.set_alignment(0, 0.5)
table.attach(label, 0, 1, row, row + 1, gtk.AttachOptions.FILL)
table.attach(control, 1, 2, row, row + 1, gtk.AttachOptions.FILL)
for label, control in zip(map(lambda x: x.get_accessible(),labels),
map(lambda x: x.get_accessible(),controls)):
label.add_relationship(atk.RelationType.LABEL_FOR, control)
control.add_relationship(atk.RelationType.LABELLED_BY, label)
def _onDurationChanged(self, spin_button):
'''
Callback for the duration spin button. Update key and the global variable
in the L{node} module.
@param spin_button: The spin button that emitted the value-changed signal.
@type spin_button: gtk.SpinButton
'''
node.HL_DURATION = int(spin_button.get_value()*1000)
self.gsettings.set_double('highlight-duration',
spin_button.get_value())
def _onColorSet(self, color_button, key):
'''
Callback for a color button. Update gsettings and the global variables
in the L{node} module.
@param color_button: The color button that emitted the color-set signal.
@type color_button: l{_HighlighterView._ColorButton}
@param key: the key name suffix for this color setting.
@type key: string
'''
if 'fill' in key:
node.FILL_COLOR = color_button.get_rgb_string()
node.FILL_ALPHA = color_button.get_alpha_float()
else:
node.BORDER_COLOR = color_button.get_rgb_string()
node.BORDER_ALPHA = color_button.get_alpha_float()
self.gsettings.set_string(key, color_button.get_rgba_string())
class _ColorButton(gtk.ColorButton):
'''
ColorButton derivative with useful methods for us.
'''
def __init__(self, color, alpha):
color = gdk.color_parse(color)
gtk.ColorButton.__init__(self)
self.set_use_alpha(True)
self.set_alpha(int(alpha*0xffff))
self.set_color(color)
def get_rgba_string(self):
'''
Get the current color and alpha in string format.
@return: String in the format of #rrggbbaa.
@rtype: string.
'''
color = self.get_color()
color_val = 0
color_val |= color.red >> 8 << 24
color_val |= color.green >> 8 << 16
color_val |= color.blue >> 8 << 8
color_val |= self.get_alpha() >> 8
return \
'#' + hex(color_val).replace('0x', '').replace('L', '').rjust(8, '0')
def get_rgb_string(self):
'''
Get the current color in string format.
@return: String in the format of #rrggbb.
@rtype: string.
'''
color = self.get_color()
color_val = 0
color_val |= color.red >> 8 << 16
color_val |= color.green >> 8 << 8
color_val |= color.blue >> 8
return \
'#' + hex(color_val).replace('0x', '').replace('L', '').rjust(6, '0')
def get_alpha_float(self):
'''
Get the current alpha as a value from 0.0 to 1.0.
'''
return self.get_alpha()/float(0xffff)
| javihernandez/accerciser-mirror | src/lib/accerciser/prefs_dialog.py | Python | bsd-3-clause | 6,660 |
# Copyright (c) 2015 Jose David Fernandez Rodriguez
#
# This file is distributed under the terms of the
# GNU Affero General Public License, version 3
# as published by the Free Software Foundation.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this file. You may obtain a copy of the License at
# http://www.gnu.org/licenses/agpl-3.0.txt
from pyclipper.plot import *
from pyclipper.Clipper import * | jdfr/pyslic3r | pyclipper/all.py | Python | agpl-3.0 | 698 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.