repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
tylereaves/26md | setlist/management/commands/__init__.py | Python | bsd-3-clause | 22 | 0 | __auth | or__ = 'Ty | ler'
|
coala/coala | tests/results/SourcePositionTest.py | Python | agpl-3.0 | 1,906 | 0.000525 | import unittest
from os.path import relpath
from coalib.results.SourcePosition import SourcePosition
from coala_utils.ContextManagers import prepare_file
class SourcePositionTest(unittest.TestCase):
def test_initialization(self):
with self.assertRaises(TypeError):
SourcePosition(None, 0)
with self.assertRaises(ValueError):
SourcePosition('file', None, 1)
# However these should work:
SourcePosition('file', None, None)
SourcePosition('file', 4, None)
SourcePosition('file', 4, 5)
def test_string_conversion(self):
uut = SourcePosition('filename', 1)
self.assertRegex(
repr(uut),
| "<SourcePosition object\\(file='.*filename', line=1, "
'column=None\\) at 0x[0-9a-fA-F]+>')
self.assertEqual(str(uut), 'filename:1')
uut = SourcePosition('None', None)
self.assertRegex(
repr(uut),
"<SourcePosition object\\(file='.*None', line=None, column=None\\) "
'at 0x[0-9a-fA-F]+>')
self.assertEqual(str(uut), 'None')
uut = Source | Position('filename', 3, 2)
self.assertEqual(str(uut), 'filename:3:2')
def test_json(self):
with prepare_file([''], None) as (_, filename):
uut = SourcePosition(filename, 1)
self.assertEqual(uut.__json__(use_relpath=True)
['file'], relpath(filename))
def assert_equal(self, first, second):
self.assertGreaterEqual(first, second)
self.assertEqual(first, second)
self.assertLessEqual(first, second)
def assert_ordering(self, greater, lesser):
self.assertGreater(greater, lesser)
self.assertGreaterEqual(greater, lesser)
self.assertNotEqual(greater, lesser)
self.assertLessEqual(lesser, greater)
self.assertLess(lesser, greater)
|
pytorch/fairseq | examples/discriminative_reranking_nmt/tasks/discriminative_reranking_task.py | Python | mit | 17,731 | 0.001184 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import logging
import os
import numpy as np
import torch
from fairseq import metrics
from fairseq.data import (
ConcatDataset,
ConcatSentencesDataset,
data_utils,
Dictionary,
IdDataset,
indexed_dataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PrependTokenDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
TruncateDataset,
TokenBlockDataset,
)
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II, MISSING
EVAL_BLEU_ORDER = 4
TARGET_METRIC_CHOICES = ChoiceEnum(["bleu", "ter"])
logger = logging.getLogger(__name__)
@dataclass
class DiscriminativeRerankingNMTConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={"help": "path to data directory"})
num_data_splits: int = field(
default=1, metadata={"help": "total number of data splits"}
)
no_shuffle: bool = field(
default=False, metadata={"help": "do not shuffle training data"}
)
max_positions: int = field(
default=512, metadata={"help": "number of positional embeddings to learn"}
)
include_src: bool = field(
default=False, metadata={"help": "include source sentence"}
)
mt_beam: int = field(default=50, metadata={"help": "beam size of input hypotheses"})
eval_target_metric: bool = field(
default=False,
metadata={"help": "evaluation with the target metric during validation"},
)
target_metric: TARGET_METRIC_CHOICES = field(
default="bleu", metadata={"help": "name of the target metric to optimize for"}
)
train_subset: str = field(
default=II("dataset.train_subset"),
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
seed: int = field(
default=II("common.seed"),
metadata={"help": "pseudo random number generator seed"},
)
class RerankerScorer(object):
"""Scores the target for a given (source (optional), target) input."""
def __init__(self, args, mt_beam):
self.mt_beam = mt_beam
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample["net_input"]
assert len(models) == 1, "does not support model ensemble"
model = models[0]
bs = net_input["src_tokens"].shape[0]
assert (
model.joint_classification == "none" or bs % self.mt_beam == 0
), f"invalid batch size ({bs}) for joint classification with beam size ({self.mt_beam})"
model.eval()
logits = model(**net_input)
batch_out = model.sentence_forward(logits, net_input["src_tokens"])
if model.joint_classification == "sent":
batch_out = model.joint_forward(
batch_out.view(self.mt_beam, bs // self.mt_beam, -1)
)
scores = model.classification_forward(
batch_out.view(bs, 1, -1)
) # input: B x T x C
return scores
@register_task(
"discriminative_reranking_nmt", dataclass=DiscriminativeRerankingNMTConfig
)
class DiscriminativeRerankingNMTTask(FairseqTask):
"""
Translation rerank task.
The input can be either (src, tgt) sentence pairs or tgt sentence only.
"""
cfg: DiscriminativeRerankingNMTConfig
def __init__(self, cfg: DiscriminativeRerankingNMTConfig, data_dictionary=None):
super().__init__(cfg)
self.dictionary = data_ | dictionary
self._max_positions = cfg.max_positions
# args.tokens_per_sample = self._max_positions
# self.num_classes = 1 # for model
@classmethod
def load_dictionary(cls, cfg, filename):
"""Load the dictionary from the filename"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol("<mask>") # for loading pretrained XLMR model
return diction | ary
@classmethod
def setup_task(cls, cfg: DiscriminativeRerankingNMTConfig, **kwargs):
# load data dictionary (assume joint dictionary)
data_path = cfg.data
data_dict = cls.load_dictionary(
cfg, os.path.join(data_path, "input_src/dict.txt")
)
logger.info("[input] src dictionary: {} types".format(len(data_dict)))
return DiscriminativeRerankingNMTTask(cfg, data_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)."""
if self.cfg.data.endswith("1"):
data_shard = (epoch - 1) % self.cfg.num_data_splits + 1
data_path = self.cfg.data[:-1] + str(data_shard)
else:
data_path = self.cfg.data
def get_path(type, data_split):
return os.path.join(data_path, str(type), data_split)
def make_dataset(type, dictionary, data_split, combine):
split_path = get_path(type, data_split)
dataset = data_utils.load_indexed_dataset(
split_path,
dictionary,
combine=combine,
)
return dataset
def load_split(data_split, metric):
input_src = None
if self.cfg.include_src:
input_src = make_dataset(
"input_src", self.dictionary, data_split, combine=False
)
assert input_src is not None, "could not find dataset: {}".format(
get_path("input_src", data_split)
)
input_tgt = make_dataset(
"input_tgt", self.dictionary, data_split, combine=False
)
assert input_tgt is not None, "could not find dataset: {}".format(
get_path("input_tgt", data_split)
)
label_path = f"{get_path(metric, data_split)}.{metric}"
assert os.path.exists(label_path), f"could not find dataset: {label_path}"
np_labels = np.loadtxt(label_path)
if self.cfg.target_metric == "ter":
np_labels = -np_labels
label = RawLabelDataset(np_labels)
return input_src, input_tgt, label
src_datasets = []
tgt_datasets = []
label_datasets = []
if split == self.cfg.train_subset:
for k in itertools.count():
split_k = "train" + (str(k) if k > 0 else "")
prefix = os.path.join(data_path, "input_tgt", split_k)
if not indexed_dataset.dataset_exists(prefix, impl=None):
if k > 0:
break
else:
raise FileNotFoundError(f"Dataset not found: {prefix}")
input_src, input_tgt, label = load_split(
split_k, self.cfg.target_metric
)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
else:
input_src, input_tgt, label = load_split(split, self.cfg.target_metric)
src_datasets.append(input_src)
tgt_datasets.append(input_tgt)
label_datasets.append(label)
if len(tgt_datasets) == 1:
input_tgt, label = tgt_datasets[0], label_datasets[0]
if self.cfg.include_src:
input_src = src_datasets[0]
else:
input_tgt = ConcatDataset(tgt_datasets)
label = ConcatDataset(label_datasets)
if self.cfg.include_src:
input_src = ConcatDataset(src_datasets)
input_tgt = TruncateDataset(input_tgt, self.cfg.max_positions)
if self.cfg.include_src:
input_src = PrependTokenDataset(input_src, self.dictionary.bos())
input_src = TruncateDataset(input_src, self.cfg.max_positions)
src_lengths = NumelDatas |
mtils/ems | examples/qt4/widgets/test_imageeditor.py | Python | mit | 341 | 0.005865 | #coding=utf-8
from | ems.qt4.application import MainApplication
from ems.qt4.gui.widgets.imageeditor import ImageEditor
import sys
if __name__ == '__main__':
import sys
app = MainApplication(sys.argv)
fileName = sys.argv[1] if len(sys.argv) > 1 else None
dlg = ImageEditor.toDialog(fileName)
dlg.show()
app | .exec_() |
palaniyappanBala/rekall | rekall-core/rekall/config.py | Python | gpl-2.0 | 8,986 | 0.00089 | #!/usr/bin/python
# Rekall
# Copyright (C) 2012 Michael Cohen <scudette@gmail.com>
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This is the Rekall configuration system.
Rekall maintains a persistent file with global settings in the user's home
directory. This makes it easy for users to retain commonly used Rekall
parameters.
Note that the configuration file is only used in interactive mode. When used as
a library the configuration file has no effect.
"""
__author__ = "Michael Cohen <scudette@gmail.com>"
import collections
import logging
import yaml
import os
import sys
import tempfile
from rekall import constants
class CommandMetadata(object):
"""A class that carried a plugin's configuration.
A plugin is responsible for declaring its metadata by calling this
configuration object's methods from the args() class method.
There are two things that plugin must declare:
add_*_arg(): Calling these functions declares an argument for this
plugin. See the documentation for that method for details.
add_metadata(): This method provides additional metadata about this plugin.
"""
def __init__(self, plugin_cls=None):
self.args = collections.OrderedDict()
self.requirements = set()
self.plugin_cls = plugin_cls
if plugin_cls:
plugin_cls.args(self)
self.description = (plugin_cls.__doc__ or
plugin_cls.__init__.__doc__ or "")
def set_description(self, description):
self.description = description
def add_positional_arg(self, name, type="string"):
"""Declare a positional arg."""
self.args[name] = dict(type=type)
def add_argument(self, short_opt, long_opt=None, **options):
"""Add a new argument to the command.
This method is used in the args() class method to add a new command line
arg to the plugin. It is similar to the argparse add_argument() method
but it adds a type parameter which conveys higher level information
about the argument. Currently supported types:
- ArrayIntParser: A list of integers (possibly encoded as he | x strings).
- ArrayStringParser: A list of strings.
- | Float: A float.
- IntParser: An integer (possibly encoded as a hex string).
- Boolean: A flag - true/false.
- ChoiceArray: A comma separated list of strings which must be from the
choices parameter.
"""
if "action" in options:
raise RuntimeError("Action keyword is deprecated.")
if not isinstance(options.get("type", ""), str):
raise RuntimeError("Type must be a string.")
# Is this a positional arg?
positional = options.pop("positional", True)
# For now we support option names with leading --.
if long_opt is None:
long_opt = short_opt
short_opt = ""
if long_opt.startswith("-"):
long_opt = long_opt.lstrip("-")
short_opt = short_opt.lstrip("-")
positional = False
name = long_opt
options["short_opt"] = short_opt
options["positional"] = positional
options["name"] = name
self.args[name] = options
def add_requirement(self, requirement):
"""Add a requirement for this plugin.
Currently supported requirements:
- profile: A profile must exist for this plugin to run.
- physical_address_space: A Physical Address Space (i.e. an image file)
must exist for this plugin to work.
"""
self.requirements.add(requirement)
def Metadata(self):
return dict(requirements=list(self.requirements),
arguments=self.args.values(), name=self.plugin_cls.name,
description=self.description)
def ApplyDefaults(self, args):
"""Update args with the defaults.
If an option in args is None, we update it with the default value for
this option.
"""
for name, options in self.args.iteritems():
if options.get("dest") == "SUPPRESS":
continue
name = name.replace("-", "_")
if args[name] is None:
args[name] = options.get("default")
return args
def GetHomeDir(session):
return (
session.GetParameter("home", cached=False) or
os.environ.get("HOME") or # Unix
os.environ.get("USERPROFILE") or # Windows
tempfile.gettempdir() or # Fallback tmp dir.
".")
# This is the configuration file template which will be created if the user does
# not have an existing file. The aim is not to exhaustively list all possible
# options, rather to ensure that reasonable defaults are specified initially.
DEFAULT_CONFIGURATION = dict(
repository_path=constants.PROFILE_REPOSITORIES,
# This is the path of the cache directory - given relative to the config
# file (or it can be specified as an absolute path).
cache_dir=".rekall_cache",
)
# Global options control the framework's own flags. They are not associated with
# any one plugin.
OPTIONS = CommandMetadata()
def GetConfigFile(session):
"""Gets the configuration stored in the config file.
Searches for the config file in reasonable locations.
Return:
configuration stored in the config file. If the file is not found, returns
an empty configuration.
"""
search_path = [
# Next to the main binary (in case of pyinstaller - rekall.exe).
os.path.join(os.path.dirname(sys.executable), ".rekallrc"),
".rekallrc", # Current directory.
os.path.join(GetHomeDir(session), ".rekallrc"), # Home directory overrides system.
"/etc/rekallrc",
]
for path in search_path:
try:
with open(path, "rb") as fd:
result = yaml.safe_load(fd)
logging.debug("Loaded configuration from %s", path)
# Allow the config file to update the
# environment. This is handy in standalone deployment
# where one can update %HOME% and ensure Rekall does
# not touch the drive.
os.environ.update(result.get("environment", {}))
return result
except (IOError, ValueError):
pass
return {}
def CreateDefaultConfigFile(session):
"""Creates a default config file."""
homedir = GetHomeDir(session)
if homedir:
try:
filename = "%s/.rekallrc" % homedir
with open(filename, "wb") as fd:
yaml.dump(DEFAULT_CONFIGURATION, fd)
logging.info("Created new configuration file %s", filename)
cache_dir = os.path.join(
homedir, DEFAULT_CONFIGURATION["cache_dir"])
os.makedirs(cache_dir)
logging.info("Created new cache directory %s", cache_dir)
return DEFAULT_CONFIGURATION
except (IOError, OSError):
pass
# Can not write it anywhere but at least we start with something sensible.
return DEFAULT_CONFIGURATION
def MergeConfigOptions(state, session):
"""Read the config file and apply the config options to the session."""
config_data = GetConfigFile(session)
# An empty configuration file - we try to initialize a new one.
if not config_data:
config_data = CreateDefaultConfigFile(s |
severin-lemaignan/dialogs | src/dialogs/timescale_manager_test.py | Python | bsd-3-clause | 33,296 | 0.004835 | # coding=utf-8
"""
Created by Chouayakh Mahdi
26/08/2010
The package contains the unit test of timescale_manager function
unit_tests : to perform unit tests
"""
import unittest
import logging
from dialogs.sentence import *
from . import timescale_manager
def print_time(time):
print((time['year'] + '/' + time['month'] + '/' + time['day']))
print((time['hour'] + ':' + time['minute'] + ':' + time['second']))
class TestTimescale(unittest.TestCase):
"""
Function to perform unit tests
"""
def test_01(self):
print('')
print('######################## test 1.1 ##############################')
print("Object of this test : Without indirect complement and without adverb")
print('')
d_time = {'year': '2010', 'month': 'August', 'day': '27', 'hour': '10', 'minute': '0', 'second': '0'}
sentence = Sentence('statement', '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], 'affirmative', [])])
print('The sentence that we will process is : ')
print("I will play a guitar a piano and a violon.")
print('')
print('The time of speaking sentence is : ')
print_time(d_time)
time = timescale_manager.timescale_sentence(sentence.sv[0].i_cmpl, sentence.sv[0].advrb, d_time)
if time['action_period'] is not None:
print('')
print('The period of the action is : ')
print_time(time['action_period']['time_begin'])
print_time(time['action_period']['time_end'])
if time['effective_time'] is not None:
print('')
print('The effective time of the action is : ')
print_time(time['effective_time'])
rslt = {'action_period': None, 'effective_time': d_time}
self.assertEqual(time, rslt)
print('')
def test_02(self):
print('')
print('######################## test 1.2 ##############################')
print("Object of this test : With just an indirect complement but not for time")
print('')
d_time = {'year': '2010', 'month': 'August', 'day': '27', 'hour': '10', 'minute': '0', 'second': '0'}
sentence = Sentence('statement', '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], 'affirmative', [])])
print('The sentence that we will process is : ')
print("the bottle is on the table")
print('')
print('The time of speaking sentence is : ')
print_time(d_time)
time = timescale_manager.timescale_sentence(sentence.sv[0].i_cmpl, sentence.sv[0].advrb, d_time)
if time['action_period'] is not None:
print('')
print('The period of the action is : ')
print_time(time['action_period']['time_begin'])
print_time(time['action_period']['time_end'])
if time['effective_time'] is not None:
print('')
print('The effective time of the action is : ')
print_time(time['effective_time'])
rslt = {'action_period': None, 'effective_time': d_time}
self.assertEqual(time, rslt)
print('')
def test_03(self):
print('')
print('######################## test 1.3 ##############################')
print("Object of this test : With just an indirect complement but not for time")
print('')
d_time = {'year': '2010', 'month': 'August', 'day': '27', 'hour': '10', 'minute': '0', 'second': '0'}
sentence = Sentence('statement', '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['prepare'], [], 'present progressive',
[NominalGroup(['the'], ['car'], [], [], []),
NominalGroup(['the'], ['moto'], [],
[NominalGroup(['my'], ['father'], [], [], [])], [])],
[IndirectComplement(['at'], [
NominalGroup(['the'], ['time'], [['same', []]], [], [])])],
[], [], 'negative', [])])
print('The sentence that we will process is : ')
print("you are not preparing the car and the moto of my father at the same time")
print('')
print('The time of speaking sentence is : ')
print_time(d_time)
time = timescale_manager.timescale_sentence(sentence.sv[0].i_cmpl, sentence.sv[0].advrb, d_time)
if time['action_period'] is not None:
print('')
print('The period of the action is : ')
print_time(time['action_period']['time_begin'])
print_time(time['action_period']['time_end'])
if time['effective_time'] is not None:
print('')
print('The effective time of the action is : ')
print_time(time['effective_time'])
rslt = {'action_period': None, 'effective_time': d_time}
self.assertEqual(time, rslt)
print('')
def test_04(self):
print('')
print('############# | ########### test 1.4 ##############################')
print("Object of this test : With an indirect complement and adverb")
print('')
d_time = {'year': '2010', 'month': 'August', 'day': '27', 'hour': '10', 'minute': '0', 'second': '0'}
sentence = Sentence('w_question', 'description',
[NominalGroup(['the'], ['weather'], [], [], [])],
[VerbalGroup(['like'], [], 'present s | imple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['winter'], [], [], [])])],
[], ['here'], 'affirmative', [])])
print('The sentence that we will process is : ')
print("what is the weather like here in the winter?")
print('')
print('The time of speaking sentence is : ')
print_time(d_time)
time = timescale_manager.timescale_sentence(sentence.sv[0].i_cmpl, sentence.sv[0].advrb, d_time)
if time['action_period'] is not None:
print('')
print('The period of the action is : ')
print_time(time['action_period']['time_begin'])
print_time(time['action_period']['time_end'])
if time['effective_time'] is not None:
print('')
print('The effective time of the action is : ')
print_time(time['effective_time'])
rslt = {'action_period': None, 'effective_time': d_time}
self.assertEqual(time, rslt)
print('')
def test_05(self):
print('')
print('######################## test 1.5 ##############################')
print("Object of this test : Adverb 'now' alone is like we have nothing")
print('')
d_time = {'year': '2010', 'month': 'August', 'day': '27', 'hour': '10', 'minute': '0', 'second': '0'}
sentence = Sentence('yes_no_ |
Eveler/libs | __Python__/ufms_blanks/appy3/fields/search.py | Python | gpl-3.0 | 8,383 | 0.002982 | # ------------------------------------------------------------------------------
# This file is part of Appy, a framework for building applications in the Python
# language. Copyright (C) 2007 Gaetan Delannay
# Appy is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
# Appy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# Appy. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------------
from appy.px import Px
from appy.gen import utils as gutils
from appy.gen.indexer import defaultIndexes
from appy.shared import utils as sutils
from .group import Group
# ------------------------------------------------------------------------------
class Search:
'''Used for specifying a search for a given class.'''
def __init__(self, name, group=None, sortBy='', sortOrder='asc', limit=None,
default=False, colspan=1, translated=None, show=True,
translatedDescr=None, **fields):
self.name = name
# Searches may be visually grouped in the portlet.
self.group = Group.get(group)
self.sortBy = sortBy
self.sortOrder = sortOrder
self.limit = limit
# If this search is the default one, it will be triggered by clicking
# on main link.
self.default = default
self.colspan = colspan
# If a translated name or description is already given here, we will
# use it instead of trying to translate from labels.
self.translated = translated
self.translatedDescr = translatedDescr
# Condition for showing or not this search
self.show = show
# In the dict below, keys are indexed field names or names of standard
# indexes, and values are search values.
self.fields = fields
@staticmethod
def getIndexName(fieldName, usage='search'):
'''Gets the name of the technical index that corresponds to field named
p_fieldName. Indexes can be used for searching (p_usage="search") or
for sorting (usage="sort"). The method returns None if the field
named p_fieldName can't be used for p_usage.'''
if fieldName == 'title':
if usage == 'search': return 'Title'
else: return 'SortableTitle'
# Indeed, for field 'title', Appy has a specific index
# 'SortableTitle', because index 'Title' is a TextIndex
# (for searchability) and can't be used for sorting.
elif fieldName == 'state': return 'State'
elif fieldName == 'created': return 'Created'
elif fieldName == 'modified': return 'Modified'
elif fieldName in defaultIndexes: return fieldName
else:
return 'get%s%s'% (fieldName[0].upper(),fieldName[1:])
@staticmethod
def getSearchValue(fieldName, fieldValue, klass):
'''Returns a transformed p_fieldValue for producing a valid search
value as required for searching in the index corresponding to
p_fieldName.'''
field = getattr(klass, fieldName, None)
if (field and (field.getIndexType() == 'TextIndex')) or \
(fieldName == 'SearchableText'):
# For TextIndex indexes. We must split p_fieldValue into keywords.
res = gutils.Keywords(fieldValue).get()
elif isinstance(fieldValue, str) and fieldValue.endswith('*'):
v = fieldValue[:-1]
# Warning: 'z' is higher than 'Z'!
res = {'query':(v,v+'z'), 'range':'min:max'}
elif type(fieldValue) in sutils.sequenceTypes:
if fieldValue and isinstance(fieldValue[0], str):
# We have a list of string values (ie: we need to
# search v1 or v2 or...)
res = fieldValue
else:
# We have a range of (int, float, DateTime...) values
minv, maxv = fieldValue
rangev = 'minmax'
queryv = fieldValue
if minv == None:
rangev = 'max'
queryv = maxv
elif maxv == No | ne:
rangev = 'min'
queryv = minv
res = {'query':queryv, | 'range':rangev}
else:
res = fieldValue
return res
def updateSearchCriteria(self, criteria, klass, advanced=False):
'''This method updates dict p_criteria with all the search criteria
corresponding to this Search instance. If p_advanced is True,
p_criteria correspond to an advanced search, to be stored in the
session: in this case we need to keep the Appy names for parameters
sortBy and sortOrder (and not "resolve" them to Zope's sort_on and
sort_order).'''
# Put search criteria in p_criteria
for fieldName, fieldValue in self.fields.items():
# Management of searches restricted to objects linked through a
# Ref field: not implemented yet.
if fieldName == '_ref': continue
# Make the correspondence between the name of the field and the
# name of the corresponding index, excepted if advanced is True: in
# that case, the correspondence will be done later.
if not advanced:
attrName = Search.getIndexName(fieldName)
# Express the field value in the way needed by the index
criteria[attrName] = Search.getSearchValue(fieldName,
fieldValue, klass)
else:
criteria[fieldName]= fieldValue
# Add a sort order if specified
if self.sortBy:
if not advanced:
criteria['sort_on'] = Search.getIndexName(self.sortBy,
usage='sort')
if self.sortOrder == 'desc': criteria['sort_order'] = 'reverse'
else: criteria['sort_order'] = None
else:
criteria['sortBy'] = self.sortBy
criteria['sortOrder'] = self.sortOrder
def isShowable(self, klass, tool):
'''Is this Search instance (defined in p_klass) showable?'''
if self.show.__class__.__name__ == 'staticmethod':
return gutils.callMethod(tool, self.show, klass=klass)
return self.show
class UiSearch:
'''Instances of this class are generated on-the-fly for manipulating a
Search from the User Interface.'''
# PX for rendering a search.
pxView = Px('''
<div class="portletSearch">
<a href=":'%s?className=%s&search=%s' % \
(queryUrl, className, search.name)"
class=":(search.name == currentSearch) and 'current' or ''"
title=":search.translatedDescr">:search.translated</a>
</div>''')
def __init__(self, search, className, tool):
self.search = search
self.name = search.name
self.type = 'search'
self.colspan = search.colspan
if search.translated:
self.translated = search.translated
self.translatedDescr = search.translatedDescr
else:
# The label may be specific in some special cases.
labelDescr = ''
if search.name == 'allSearch':
label = '%s_plural' % className
elif search.name == 'customSearch':
label = 'search_results'
else:
label = '%s_search_%s' % (className, search.name)
labelDescr = label + '_descr'
self.translated = tool.translate(label)
if labelDescr:
|
ad-dycost/mindhouse | manage_pressure/watching.py | Python | gpl-3.0 | 576 | 0.013889 | #!/usr/bin/env python
# - * - mode: python; coding: utf-8 - * -
# Copyright (C) 2013 Andrey Degtyarev <ad.dycost@gmail.com>
# This program is distributed licensed under the GNU General Public License v.3
# as published by the Free Softwa | re Foundation.
import manage_pressure.constants, manage_pressure.work_device, time
def control(motor_id, pressure_1_id, pressure_2_id):
devices = manage_pressure.work_device.WorkDevice(motor_id, pressure_1_id, pressure_2_id)
while 1:
devi | ces.check()
devices.action()
time.sleep(manage_pressure.constants.TIME_REQUEST_DEVICE)
|
zygmuntz/evaluating-recommenders | plot_ndcgs.py | Python | bsd-3-clause | 885 | 0.055367 | "plot average NDCGs"
import cPickle as pickle
from collections import OrderedDict
from matplotlib import pyplot as plt
from os import listdir
from os. | path import isfile, join
# minimum number of users we | have scores for to average
min_users = 10
input_dir = 'ndcgs/'
# a list of input files
input_files = [ join( input_dir, f ) for f in listdir( input_dir ) if isfile( join( input_dir, f )) and f.endswith( '.pkl' ) ]
for i_f in input_files:
print i_f
#
ndcgs = [ pickle.load( open( i_f, 'rb' ))['ndcgs'] for i_f in input_files ]
for i in range( len( ndcgs )):
assert( sorted( ndcgs[i].keys()) == ndcgs[i].keys())
mean_ndcgs = [
OrderedDict( { k: sum( v ) / len( v ) for k, v in x.items() if len( v ) >= min_users } )
for x in ndcgs ]
colors = [ 'g', 'b', 'r', 'k', 'y' ]
for i, n in enumerate( mean_ndcgs ):
plt.plot( n.keys(), n.values(), colors[i] )
plt.show()
|
vonwenm/pbft | sfslite-1.2/Attic/python/ex1/tst.py | Python | gpl-2.0 | 165 | 0.054545 |
import ex1
import async
import socket
sock = socket.socket ()
fd = sock.fileno ()
x = async.arpc.axprt_s | tream (fd)
cl = async.arpc. | aclnt (x, ex1.foo_prog_1 ())
|
opendatakosovo/relate-with-it | importer/kosovo_importer.py | Python | mit | 1,479 | 0.005409 | from abstract_importer import AbstractImporter
from slugify import slugify
class KosovoImporter(AbstractImporter):
def __init__(self):
pass
def get_csv_filename(self):
return "importer/data/kosovo/kosovo-budget-expenditures-2014.csv"
def get_region(self):
return 'Kosovo'
def get_dataset(self):
return 'Budget Expenditure (2014)'
def build_docs(self, row):
# In this case, it's because in the CSV doc there is a column for each year...
year = row[3]
# Clean expense string so that is is numerical (e.g. turn blank string to 0).
cost = row[2].replace(',', '')
if not cost.strip():
cost = 0
# Create doc.
doc = {
'region': {
'name': self.get_region(),
'slug': slugify(self.get_region(), to_lower=True)
},
'dataset': {
'name': self.get_dataset(),
'slug': | slugify(self.get_dataset(), to_lower=True)
},
'activity': {
'type': row[0],
'description': row[1]
},
'cost': float(cost),
'year': int(year)
}
# Console output to provide user with feedback on status of importing pro | cess.
print '%s - %s: %s (%s %i)' % (doc['activity']['type'], doc['activity']['description'], doc['cost'], doc['region']['name'], doc['year'])
return [doc] |
googleads/google-ads-python | google/ads/googleads/v9/services/services/customer_feed_service/transports/grpc.py | Python | apache-2.0 | 12,306 | 0.000975 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import customer_feed
from google.ads.goo | gleads.v9.services.types import customer_feed_service
from .base import CustomerFeedServiceTransport, DEFAULT_CLIENT_INFO
class CustomerFeedServiceGrpcTransport(CustomerFeedServiceTransport):
"""gRPC backend transport f | or CustomerFeedService.
Service to manage customer feeds.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of s |
ashawnbandy-te-tfb/FrameworkBenchmarks | frameworks/Python/web2py/app/standard/modules/database.py | Python | bsd-3-clause | 2,330 | 0.003863 | # -*- coding: utf-8 -*-
import os
from operator import itemgetter
from gluon.storage import Storage
from gluon.dal import DAL, Field, Row
DBHOST = os.environ.get('DBHOST', 'localhost')
DATABASE_URI = 'mysql://benchmarkdbuser:benchmarkdbpass@%s:3306/hello_world' % DBHOST
class Dal(object):
def __init__(self, table=None, pool_size=8):
self.db = DAL(DATABASE_URI, migrate_enabled=False, pool_size=pool_size)
if table == 'World':
self.db.define_table('World', Field('randomNumber', 'integer'))
elif table == 'Fortune':
self.db.define_table('Fortune', Field('message'))
def get_world(self, wid):
# Setting `cacheable=True` improves performance by foregoing the creation
# of some non-essential attributes. It does *not* actually cache the
# database results (it simply produces a Rows object that *could be* cached).
return self.db(self.db.World.id == wid).select(cacheable=True)[0].as_dict()
def update_world(self, wid, randomNumber):
self.db(self.db.World.id == wid).update(randomNumber=randomNumber)
def get_fortunes(self, new_message):
fortunes = self.db(self.db.Fortune).select(cacheable=True)
fortunes.records.append(Row(new_message))
return fortunes.sort(itemgetter('message'))
class RawDal(Dal):
def __init__(self):
super(RawDal, self).__init__()
self.world_updates = []
def get_world(self, wid):
return self.db.executesql('SELECT * FROM Worl | d WHERE id = %s',
placeholders=[wid], as_dict=True)[0]
def update_world(self, wid, randomNumber):
self.world_updates.extend([randomNumber, wid])
def flush_world_updates(self):
query = ';'.join('UPDATE World SET randomNumber=%s WHERE id=%s'
for _ in xrange(len(self.worl | d_updates) / 2))
self.db.executesql(query, placeholders=self.world_updates)
def get_fortunes(self, new_message):
fortunes = self.db.executesql('SELECT * FROM Fortune', as_dict=True)
fortunes.append(new_message)
return sorted(fortunes, key=itemgetter('message'))
def num_queries(queries):
try:
num = int(queries)
return 1 if num < 1 else 500 if num > 500 else num
except ValueError:
return 1
|
Shevraar/jovabot | modules/addressbook/paginebianche.py | Python | gpl-2.0 | 1,262 | 0.000792 | import logging
import sys
import requests
from bs4 import BeautifulSoup
# White pages url
WHITE_PAGES_URL = "http://www.paginebianch | e.it/execute.cgi"
class wp_response(object) | :
def __init__(self, name, tel, addr):
self.name = name
self.tel = tel
self.addr = addr
def search_wp(name, location):
payload = {'btt': '1', 'qs': name, 'dv': location}
try:
r = requests.get(WHITE_PAGES_URL, params=payload)
logging.info('requesting url {0}'.format(r.url))
return parse_response(r.text)
except Exception as e:
logging.exception('search failed {0}'.format(e))
return None
def parse_response(text):
soup = BeautifulSoup(text, 'html.parser')
for d in soup.find_all("div", "vcard"):
try:
name = d.find('h2', 'rgs').a['title']
phone = d.find("div", "tel").find("span", "value").text
address = d.find("div", "address").div.text
except:
logging.exception(d.find('h2', 'rgs'))
continue
yield wp_response(name, phone, address)
def test():
name = sys.argv[1]
loc = sys.argv[2]
for o in search_wp(name, loc):
logging.debug(o.__dict__)
if __name__ == "__main__":
test()
|
xahhy/Django-vod | epg/models.py | Python | lgpl-3.0 | 1,604 | 0.000633 | from django.db import models
class Channel(models.Model):
channel_id = models.CharField(max_length=50, unique=True)
channel_name = models.CharField(max_length=50, null=True, blank=True)
rtmp_url = models.CharField(max_length=100, null=True, blank=True)
active = models.IntegerField(null=True, blank=True)
start = models.IntegerField(null=True, blank=True)
PID = models.IntegerField(null=True, blank=True)
PGID = models.IntegerField(null=True, blank=True)
client_ip = models.CharField(max_length=50, null=True, blank=True)
sort = models.IntegerField(null=False, blank=True, default=0)
class Meta:
managed = False
db_table = 'channel'
verbose_name = '频道'
verbose_name_plural = '频道管理'
def __str__(self):
return s | elf.channel_name + '(' + self.channel_id + ')'
class Program(models.Model):
channel = models.ForeignKey(Channel, to_field='channel_id', null=True)
start_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
end_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
url = models.CharField(max_lengt | h=50, null=True, blank=True)
title = models.CharField(max_length=50, null=True, blank=True)
finished = models.IntegerField(null=True, blank=True, default=0)
event_id = models.IntegerField(null=True, blank=True)
class Meta:
managed = False
db_table = 'program'
verbose_name = '节目'
verbose_name_plural = '节目管理'
def __str__(self):
return str(self.channel) + ':' + self.title
|
JacobSheehy/pressureNETAnalysis | readings/serializers.py | Python | gpl-3.0 | 1,723 | 0 | from rest_framework import serializers
from customers import choices as customers_choices
from customers.models import Customer
from readings.models import Reading, Condition
class ReadingListSerializer(serializers.ModelSerializer):
class Meta:
model = Reading
fields = (
'reading',
'daterecorded',
'latitude',
'longitude',
)
class ReadingLiveSerializer(serializers.ModelSerializer):
class Meta:
model = Reading
fields = (
'reading',
'latitude',
'longitude',
'daterecorded',
'user_id',
'tzoffset',
'sharing',
'provider',
'client_key',
'location_accuracy',
'reading_accuracy',
'observation_type',
'observation_unit',
)
def get_fields(self):
fields = super(ReadingLiveSerializer, self).get_fields()
api_key = self.context['view'].request.GET.get('api_key', '')
customer = Customer.objects.get(api_key=api_key)
if customer.customer_type == customers_choices.CUSTOMER_PUBLIC:
del fields['user_id']
return fields
class ConditionListSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = (
'latitude',
'longitude',
'altitude',
'daterecorded',
'general_condition',
'windy',
'fog_thickness',
'precipitation_type',
'precipitation_amo | unt',
'precipitation_unit',
'thunderstorm_intensity',
'u | ser_comment',
)
|
framasoft/searx | searx/engines/kickass.py | Python | agpl-3.0 | 3,732 | 0.000536 | """
Kickass Torrent (Videos, Music, Files)
@website https://kickass.so
@provide-api no (nothing found)
@using-api no
@results HTML (using search portal)
@stable yes (HTML can change)
@parse url, title, content, seed, leech, magnetlink
"""
from urlparse import urljoin
from cgi import escape
from urllib import quote
from lxml import html
from operator import itemgetter
from searx.engines.xpath import extract_text
# engine dependent config
categories = ['videos', 'music', 'files']
paging = True
# search-url
url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
magnet_xpath = './/a[@title="Torrent magnet link"]'
torrent_xpath = './/a[@title="Download torrent file"]'
content_xpath = './/span[@class="font11px lightgrey block"]'
# do search-request
def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.tex | t)
search_res = dom.xpath('//table[@class="data"]//tr')
# return empty array if noth | ing is found
if not search_res:
return []
# parse results
for result in search_res[1:]:
link = result.xpath('.//a[@class="cellMainLink"]')[0]
href = urljoin(url, link.attrib['href'])
title = extract_text(link)
content = escape(extract_text(result.xpath(content_xpath)))
seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
# convert seed to int if possible
if seed.isdigit():
seed = int(seed)
else:
seed = 0
# convert leech to int if possible
if leech.isdigit():
leech = int(leech)
else:
leech = 0
# convert filesize to byte if possible
try:
filesize = float(filesize)
# convert filesize to byte
if filesize_multiplier == 'TB':
filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
elif filesize_multiplier == 'GB':
filesize = int(filesize * 1024 * 1024 * 1024)
elif filesize_multiplier == 'MB':
filesize = int(filesize * 1024 * 1024)
elif filesize_multiplier == 'KB':
filesize = int(filesize * 1024)
except:
filesize = None
# convert files to int if possible
if files.isdigit():
files = int(files)
else:
files = None
magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
# append result
results.append({'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
'filesize': filesize,
'files': files,
'magnetlink': magnetlink,
'torrentfile': torrentfileurl,
'template': 'torrent.html'})
# return results sorted by seeder
return sorted(results, key=itemgetter('seed'), reverse=True)
|
ArchiFleKs/magnum | magnum/api/app.py | Python | apache-2.0 | 2,079 | 0 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from oslo_config import cfg
from oslo_log import log
from paste import deploy
import pecan
from magnum.api import config as api_config
from magnum.api import middleware
from magnum.common import config as common_config
from magnum.common import service
import magnum.conf
CONF = magnum.conf.CONF
LOG = log.getLogger( | __name__)
def get_ | pecan_config():
# Set up the pecan configuration
filename = api_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config=None):
if not config:
config = get_pecan_config()
app_conf = dict(config.app)
common_config.set_config_defaults()
app = pecan.make_app(
app_conf.pop('root'),
logging=getattr(config, 'logging', {}),
wrap_app=middleware.ParsableErrorMiddleware,
guess_content_type_from_ext=False,
**app_conf
)
return app
def load_app():
cfg_file = None
cfg_path = CONF.api.api_paste_config
if not os.path.isabs(cfg_path):
cfg_file = CONF.find_file(cfg_path)
elif os.path.exists(cfg_path):
cfg_file = cfg_path
if not cfg_file:
raise cfg.ConfigFilesNotFoundError([CONF.api.api_paste_config])
LOG.info("Full WSGI config used: %s", cfg_file)
return deploy.loadapp("config:" + cfg_file)
def app_factory(global_config, **local_conf):
return setup_app()
def build_wsgi_app(argv=None):
service.prepare_service(sys.argv)
return load_app()
|
MostlyOpen/odoo_addons_jcafb | myo_address_cst/models/address.py | Python | agpl-3.0 | 1,235 | 0 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WAR | RANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You s | hould have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models
class AddressCategory(models.Model):
_inherit = 'myo.address.category'
_defaults = {
'active_log': True,
}
class Address(models.Model):
_inherit = 'myo.address'
_defaults = {
'name': '/',
'automatic_set_name': True,
'active_log': True,
}
|
jamrootz/challenge100 | hundredBeers.py | Python | gpl-3.0 | 7,054 | 0.033598 | #!/usr/bin/python3
'''
Program to parse the beer standings table.
Should categorize beers into a dictionary or other data structure.
Add some new categories: Distribution Regions, Season, Rarity
'''
from html.parser import HTMLParser
import json
import math
beerList = [] #Will contain all the the beer objects
extra_categories = ("Distribution", "Establishments", "Dates", "Availability", "highlight")
class MyHTMLParser(HTMLParser):
inTable = False # Indicates whether data falls within a table
inHeader = False # Indicates whether data falls within a tracked table element
inData = False # Indicates whether data falls within a tracked table element
categories = []
index = 0 # Running count of data elements accessed to be indexed mod len(categories)
beer = []
def __init__(self):
super(MyHTMLParser,self).__init__(convert_charrefs=True)
def handle_starttag(self, tag, attrs):
if "table" == tag:
self.inTable = True
print("Starting Table")
elif tag == "th":
#print("Start header Tag: ", tag)
self.inHeader = True
elif tag == "td":
#print("Start data Tag: ", tag)
self.inData = True
def handle_endtag(self, tag):
if tag == "table":
self.inTable = False
print("Ending Table")
elif tag == "th":
#print("End header Tag: ", tag)
self.inHeader = False
elif tag == "td":
#print("End data Tag: ", tag)
self.inData = False
def handle_data(self, data):
#print("In tag: %d, In table: %d" % (self.inTag, self.inTable) )
if self.inHeader and self.inTable:
#print("Category: ", data)
self.categories.append(data.strip())
elif self.inData and self.inTable:
#print("Data: ", data)
self.beer.append(data.strip())
# after appending the last trait for a beer, create a new beer object
if self.index % len(self.categories) == len(self.categories) -1:
#print("Adding: ", self.beer[(self.index+1)-len(self.categories):self.index+1])
beerList.append( Beer(self.categories, self.beer[(self.index+1)-len(self.categories):self.index+1]) )
self.index += 1
class Beer():
def __init__(self, catList, bList): # pass in list of categories from table headers
self.details = {}
for cat, element in zip(catList, bList):
self.details[cat] = element
for cat in extra_categories:
if cat not in list(self.details.keys()):
self.details[cat] = ""
if self.details["Checkin"] in ("True", "\u2713"):
self.details["Checkin"]= "True"
else: self.details["Checkin"]= "False"
def showDetails(self):
for d in list(self.details.keys()): print(d,": ", self.details[d])
print('\n\n')
def add_beer(sample):
clear_screen()
print("Adding a beer\n\n\n")
categories = list(sample.details.keys())
values = [ input("Enter %s: " % cat) for cat in categories ]
beer = Beer(categories, values)
return beer
def create_webpage(beer2find):
page = open("gh-pages/index.html","w")
page.write("<html>\n<head><title>Beer Challenge</title></head>\n")
page.write("<body><h2 align=center>Beers left to find: %d</h2>\n<table width=100%% align=center border=1px>\n" % len(beer2find) )
for index, beer in enumerate(beer2find):
if beer.details["highlight"] == "True":
page.write("<tr bgcolor=yellow>\n")
else:
page.write("<tr>\n")
page.write("<td>%2d</td>" % (index+1))
page.write("<td> <em>%s</em> <br> <b>%s</b> </td>" % (beer.details["Beer"], beer.details["Brewery"]) )
page.write("<td> %s <br> <b>%s</b> </td>" % (beer.details["Style"], beer.details["State"]) )
page.write("<td width=35%%> Where: %s <br> When: %s </td>" % (beer.details["Distribution"], beer.details["Dates"]) )
page.write("<td width=35%%> Found: %s <br> %s </td>" % (beer.details["Establishments"], beer.details["Availability"]) )
page.write("</tr>\n")
page.write("</table>\n</body>\n</html>")
page.close()
def clear_screen():
print("\n"*60)
def show_options(beer2find):
clear_screen()
print("Which beer do you wish to modify?"+"\n"*3)
for index in range(math.ceil(len(beer2find)/2)):
print( str(index).rjust(15) + ". %(Brewery)-25s - %(Beer)-50s" % beer2find[index].details , end='')
if index + math.ceil(len(beer2find)/2) < len(beer2find):
print( str(index + math.ceil(len(beer2find)/2) ).rjust(15) + ". %(Brewery)-25s - %(Beer)s" % beer2find[(index+ math.ceil(len(beer2find)/2))].details )
opt = inp | ut("\n"*5 + "Enter the number of the beer (q=quit, a=add, u=uncheck, c=clear highlights, b=brewery sort, s=state sort): ")
return opt.lower()
def edit_beer(beer):
clear_screen()
print("Selected %(Brewery)s (%(Beer)s) " % beer.details)
print("\n\n\n\tOptions:\n")
print("\t\t1. Check in")
print("\t\t2. Add D | istribution Details")
print("\t\t3. Add Dates of availability")
print("\t\t4. Add Establishments carrying the beer")
print("\t\t5. Describe abundance of the beer")
print("\t\t6. Toggle Highlight (%(highlight)s)" % beer.details)
print("\t\t7. Done")
choice = input("\n\n\nWhat would you like to do? ")
if choice == "1":
beer.details["Checkin"] = "True"
elif choice == "6":
if beer.details["highlight"] == "True":
beer.details["highlight"] = ""
else: beer.details["highlight"] = "True"
def clear_highlights(beers):
# Set 'highlight' key of each beer detail to "" to clear state
for beer in beers:
beer.details["highlight"] = ""
'''
# If progress.json does not exist, create it from html input
'''
# If progress.json already exists, read and then overwrite it
try:
brewfile = open("progress.json" , 'r' )
brews = brewfile.readlines()
for brew in brews:
jBeer = json.loads(brew)
#keys = ["highlight"]
#values = [""]
keys = []
values = []
for key in jBeer:
keys.append(key)
values.append(jBeer[key])
beerList.append( Beer(keys, values) )
brewfile.close()
except FileNotFoundError:
brewfile = open("beerTable.html" , 'r' )
parser = MyHTMLParser()
parser.feed("".join(brewfile.readlines() ) )
brewfile.close()
#for brew in beerList: brew.showDetails()
opt=None
while not opt == 'q':
# Create list of beers yet to be found
beer2find = [ beer for beer in beerList if beer.details["Checkin"] == "False" ]
opt = show_options(beer2find)
try:
num = int(opt)
if num in range(len(beer2find)):
# edit that beer
edit_beer(beer2find[num])
else:
# Give a warning and/or quit
pass
except ValueError:
# It was not a number so should be a control character
if opt == 'a':
# Add beer routine
beerList.append( add_beer(beer2find[0]) )
elif opt == 'u':
# Uncheck beer routine
pass
elif opt == 'c':
# Unhighlight all selections
clear_highlights(beerList)
elif opt == 'b':
# Sort by brewery routine
beerList = sorted(beerList, key=lambda k: k.details['Brewery'])
elif opt == 's':
# Sort by state routine
beerList = sorted(beerList, key=lambda k: k.details['State'])
elif opt == 'q':
# Quit routine
pass
create_webpage(beer2find)
# Write the updated progress to our json file
brewfile = open("progress.json", "w")
#json.JSONEncoder().encode(beerList)
for brew in beerList:
json.dump(brew.details, brewfile)
brewfile.write("\n")
brewfile.close()
|
agentmilindu/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/extensions/py/ExtensionExecutor.py | Python | apache-2.0 | 2,389 | 0.003349 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy | of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License fo | r the
# specific language governing permissions and limitations
# under the License.
from plugins.contracts import ICartridgeAgentPlugin
import os
import subprocess
from modules.util.log import LogFactory
class ExtensionExecutor(ICartridgeAgentPlugin):
def run_plugin(self, values):
log = LogFactory().get_log(__name__)
event_name = values["EVENT"]
log.debug("Running extension for %s" % event_name)
extension_values = {}
for key in values.keys():
extension_values["STRATOS_" + key] = values[key]
# log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key]))
try:
output, errors = ExtensionExecutor.execute_script(event_name + ".sh")
except OSError:
raise RuntimeError("Could not find an extension file for event %s" % event_name)
if len(errors) > 0:
raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors))
log.info("%s Extension executed. [output]: %s" % (event_name, output))
@staticmethod
def execute_script(bash_file):
""" Execute the given bash files in the <PCA_HOME>/extensions/bash folder
:param bash_file: name of the bash file to execute
:return: tuple of (output, errors)
"""
working_dir = os.path.abspath(os.path.dirname(__file__)).split("modules")[0]
command = working_dir[:-2] + "bash/" + bash_file
extension_values = os.environ.copy()
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values)
output, errors = p.communicate()
return output, errors |
adrianmoisey/lint-review | tests/tools/test_shellcheck.py | Python | mit | 2,600 | 0 | from lintreview.review import Problems
from lintreview.review import Comment
from lintreview.tools.shellcheck import Shellcheck
from lintreview.utils import in_path
from unittest import TestCase
from unittest import skipIf
from nose.tools import eq_
shellcheck_missing = not(in_path('shellCheck'))
class Testshellcheck(TestCase):
needs_shellcheck = skipIf(shellcheck_missing, 'Needs shellcheck')
fixtures = [
'tests/fixtures/shellcheck/no_errors.sh',
'tests/fixtures/shellcheck/has_errors.sh',
]
def setUp(self):
self.problems = Problems()
self.tool = Shellcheck(self.problems)
def test_match_file(self):
self.assertTrue(self.tool.match_file('test.sh'))
self.assertTrue(self.tool.match_file('dir/name/test.sh'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertFalse(self.tool.match_file('test.py'))
self.assertFalse(self.tool.match_file('test.js'))
@needs_shellcheck
def test_check_dependencies(self):
self.assertTrue(self.tool.check_dependencies())
@needs_shellcheck
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
eq_([], self.problems.all(self.fixtures[0]))
@needs_shellcheck
def test_process_files__one_file_fail(self):
self.tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
eq_(2, len(problems))
fname = self.fixtures[1]
expected = Comment(
fname,
5,
3,
'a is referenced but not assigned.\nDouble quote to preve | nt '
'globbing and word splitting.')
eq_(expected, problems[0])
expected = Comment(
fname,
5,
5,
'The order of the 2>&1 and the redirect matte | rs. The 2>&1 has to '
'be last.')
eq_(expected, problems[1])
@needs_shellcheck
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
eq_([], self.problems.all(self.fixtures[0]))
problems = self.problems.all(self.fixtures[1])
eq_(2, len(problems))
@needs_shellcheck
def test_process_files_with_config(self):
config = {
'shell': 'bash',
'exclude': 'SC2154,SC2069'
}
tool = Shellcheck(self.problems, config)
tool.process_files([self.fixtures[1]])
problems = self.problems.all(self.fixtures[1])
eq_(1, len(problems), 'Changing standards changes error counts')
|
VTabolin/networking-vsphere | networking_vsphere/agent/firewalls/dvs_securitygroup_rpc.py | Python | apache-2.0 | 2,782 | 0 | # Copyright 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from threading import Timer
from oslo_log import log as logging
from networking_vsphere._i18n import _LI
from networking_vsphere.utils.rpc_translator import update_rules
from neutron.agent import securitygroups_rpc
LOG = logging.getLogger(__name__)
class DVSSecurityGroupRpc(securitygroups_rpc.SecurityGroupAgentRpc):
def __init__(self, context, plugin_rpc,
defer_refresh_firewall=False):
self.context = context
self.plugin_rpc = plugin_rpc
self._devices_to_update = set()
self.init_firewall(defer_refresh_firewall)
def prepare_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, list(device_ids))
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, list(device_ids))
self.firewall.prepare_port_filter(devices.values())
def remove_devices_filter(self, device_ids):
if not device_ids:
return
LOG.info(_LI("Remove | device filter for %r"), device_ids)
self.firewall.remove_port_filter(device_ids)
def _refresh_ports(self):
device_ids = self._devices_to_update
self._devices_to_update = self._devices_to_update - device_ids
if not device_id | s:
return
if self.use_enhanced_rpc:
devices_info = self.plugin_rpc.security_group_info_for_devices(
self.context, device_ids)
devices = update_rules(devices_info)
else:
devices = self.plugin_rpc.security_group_rules_for_devices(
self.context, device_ids)
self.firewall.update_port_filter(devices.values())
def refresh_firewall(self, device_ids=None):
LOG.info(_LI("Refresh firewall rules"))
self._devices_to_update |= device_ids
if device_ids:
Timer(2, self._refresh_ports).start()
|
JazzeYoung/VeryDeepAutoEncoder | theano/gpuarray/nerv.py | Python | bsd-3-clause | 6,598 | 0.000152 | from __future__ import absolute_import, print_function, division
import os.path
import theano
from theano import Apply, Variable, tensor
from theano.compile import optdb
from theano.compile.ops import shape_i
from theano.gof import local_optimizer, COp
from theano.scalar import as_scalar, constant
from . import opt
from .basic_ops import (as_gpuarray_variable, GpuAllocEmpty,
infer_context_name)
from .type import gpu_context_type
from .opt_util import alpha_merge, output_merge
try:
from nervanagpu.nervanagpu import GPUTensor, NervanaGPU
nerv = NervanaGPU()
except ImportError:
GPUTensor = None
nerv = None
def to_gputensor(a):
assert a.flags.c_contiguous or a.flags.f_contiguous
return GPUTensor(a.shape, dtype=a.dtype, base=a,
gpudata=a.gpudata + a.offset,
strides=a.strides, is_trans=a.flags.f_contiguous)
def ensure_float(val, name):
if not isinstance(val, Variable):
val = constant(val)
if hasattr(val, 'ndim') and val.ndim == 0:
val = as_scalar(val)
if not isinstance(val.type, theano.scalar.Scalar):
raise TypeError("%s: expected a scalar value" % (name,))
if not val.type.dtype == 'float32':
raise TypeError("%s: type is not float32" % (name,))
return val
class Gemm16(COp):
"""
Gemm for float16 using the nervena kernels.
"""
__props__ = ('relu', 'inplace')
_f16_ok = True
params_type = gpu_context_type
KERN_NAMES = ('nn_128x128', 'nn_128x64', 'nn_128x32',
'nn_vec_128x128', 'nn_vec_128x64', 'nn_vec_128x32',
'tn_128x128', 'tn_128x64', 'tn_128x32',
'tn_vec_128x128', 'tn_vec_128x64', 'tn_vec_128x32',
'tn_vec_128x16', 'nt_128x128', 'nt_vec_128x128')
def __init__(self, relu=False, inplace=False):
COp.__init__(self, ["gemm16.c"], "gemm16")
self.relu = relu
# relu = True will require more work in optimizations.
assert self.relu is False
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
def make_node(self, C, alpha, A, B, beta):
if GPUTensor is None:
raise RuntimeError("Can't use Gemm16: nervanagpu not found")
ctx_name = infer_context_name(C, A, B)
A = as_gpuarray_variable(A, ctx_name)
B = as_gpuarray_variable(B, ctx_name)
C = as_gpuarray_variable(C, ctx_name)
alpha = ensure_float(alpha, 'alpha')
beta = ensure_float(beta, 'beta')
assert C.dtype == A.dtype == B.dtype == 'float16'
return Apply(self, [C, alpha, A, B, beta], [C.type()])
def get_params(self, node):
return node.inputs[0].type.context
def c_headers(self):
return ['gpuarray/types.h', 'numpy_compat.h', 'gpuarray_helper.h',
'string.h']
def c_header_dirs(self):
return [os.path.dirname(__file__)]
def get_op_params(self):
return [('GEMM16_INPLACE', '1' if self.inplace else '0')]
@staticmethod
def cubin_to_code(name):
fname = 'hgemm_{0}.cubin'.format(name)
with open(os.path.join(nerv.cubin_path, fname)) as f:
cubin = f.read()
bcode = ','.join(hex(ord(c)) for c in cubin)
return "static const char bin_%s[] = { %s };" % (name, bcode)
@staticmethod
def init_gpukernel(name, fail):
return """
bcode = bin_%(name)s;
sz = sizeof(bin_%(name)s);
if (GpuKernel_init(&k_%(name)s, c->ctx, 1, &bcode, &sz,
"hgemm_%(name)s", 13, types, GA_USE_BINARY, NULL)
!= GA_NO_ERROR) {
PyErr_SetString(PyExc_RuntimeError, "Could not initialize kernel %(name)s");
%(fail)s;
}
""" % dict(name=name, fail=fail)
def c_support_code(self):
codel = []
for name in self.KERN_NAMES:
codel.append(Gemm16.cubin_to_code(name))
return '\n'.join(codel)
def c_support_code_struct(self, node, nodename):
codel = []
for name in self.KERN_NAMES:
codel.append("GpuKernel k_{0};".format(name))
codel.append(super(Gemm16, self).c_support_code_struct(node, nodename))
return '\n'.join(codel)
def c_init_code_struct(self, node, nodename, sub):
codel = [super(Gemm16, self).c_init_code_struct(node, nodename, sub)]
for name in self.KERN_NAMES:
| codel.append("memset(&k_{0}, 0, sizeof(GpuKernel));".format(name))
codel.append("const char *bcode;")
codel.append("size_t sz;")
codel.append("PyGpuContextObject *c = %s;" % (sub['params'],))
codel.append("int types[13] = | {GA_BUFFER, GA_BUFFER, GA_BUFFER, "
"GA_BUFFER, GA_INT, GA_INT, GA_INT, GA_INT, GA_INT, "
"GA_INT, GA_FLOAT, GA_FLOAT, GA_INT};")
for name in self.KERN_NAMES:
codel.append(self.init_gpukernel(name, sub['fail']))
return '\n'.join(codel)
def c_cleanup_code_struct(self, node, nodename):
codel = []
for name in self.KERN_NAMES:
codel.append("GpuKernel_clear(&k_{0});".format(name))
return '\n'.join(codel)
@opt.register_opt()
@opt.op_lifter([tensor.Dot])
def local_dot_to_gemm16(node, ctx_name):
if nerv is None:
return
A = node.inputs[0]
B = node.inputs[1]
if (A.ndim == 2 and B.ndim == 2 and
A.dtype == 'float16' and B.dtype == 'float16'):
fgraph = node.inputs[0].fgraph
C = GpuAllocEmpty(dtype='float16', context_name=ctx_name)(
shape_i(A, 0, fgraph), shape_i(B, 1, fgraph))
return Gemm16()(C, 1.0, A, B, 0.0)
@opt.register_opt()
@alpha_merge(Gemm16, alpha_in=1, beta_in=4)
def local_gemm16_alpha_merge(node, *inputs):
return [Gemm16(relu=node.op.relu)(*inputs)]
@opt.register_opt()
@output_merge(Gemm16, alpha_in=1, beta_in=4, out_in=0)
def local_gemm16_output_merge(node, *inputs):
return [Gemm16(relu=node.op.relu)(*inputs)]
@local_optimizer([Gemm16], inplace=True)
def local_gemm16_inplace(node):
if type(node.op) != Gemm16 or node.op.inplace:
return
inputs = list(node.inputs)
C = inputs[0]
if (C.owner and
isinstance(C.owner.op, GpuAllocEmpty) and
len(C.clients) > 1):
inputs[0] = C.owner.op(*C.owner.inputs)
return [Gemm16(relu=node.op.relu, inplace=True)(*inputs)]
optdb.register('local_gemm16_inplace',
tensor.opt.in2out(local_gemm16_inplace,
name='local_gemm16_inplace'),
70.0, 'fast_run', 'inplace', 'gpuarray')
|
jcrudy/clinvoc | clinvoc/examples/parser_example.py | Python | mit | 312 | 0.003205 | from clinvoc.icd9 import ICD9CM
# This string describes a | set of ICD 9 codes
codestring = '745.0-745.3, 745.6*, 746, 747.1-747.49, 747.81, 747.89, 35.8, 35.81, 35.82, 35.83, 35.84'
# Use clinvoc to parse and standardize the above codes
vocab = ICD9CM()
codeset = vocab.parse( | codestring)
print(sorted(codeset))
|
afaheem88/tempest_neutron | tempest/tests/fake_credentials.py | Python | apache-2.0 | 1,922 | 0 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
class FakeCredentials(auth.Credentials):
def is_valid(self):
return True
class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
tenant_name='fake_tenant_name'
)
super(FakeKeystoneV2Credentials, self).__init__(**creds)
class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
"""
Fake credentials suitable for the Keystone Identity V3 API
"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name'
)
super(FakeKeystoneV3Credentials, self).__init__(**creds)
class FakeKeystoneV3DomainCredenti | als(auth.KeystoneV3Credentials):
"""
Fake credentials suitable for the Keystone Identity V3 API, with no scope
"""
def __init__(self):
creds = dict(
| username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name'
)
super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
|
akshaybabloo/Car-ND | Term_1/CNN_5/CNN_example_5.py | Python | mit | 7,593 | 0.003688 | """
Applying Convolutional Neural Network in TensorFlow
The structure of this network follows the classic structure of CNNs, which is a mix of convolutional layers and max
pooling, followed by fully-connected layers.
The code you'll be looking at is similar to what you saw in the segment on Deep Neural Network in TensorFlow,
except we restructured the architecture of this network as a CNN.
"""
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
# Parameters
learning_rate = 0.00001
epochs = 10
batch_size = 128
# Number of samples to calculate validation and accuracy
# Decrease this if you're running out of memory to calculate accuracy
test_valid_size = 256
# Network Parameters
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Store layers weight & bias
weights = {
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
'wd1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])),
'out': tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))}
def conv2d(x, W, b, strides=1):
"""
.. figure:: convolution-schematic.gif
:align: center
Convolution with 3x3 filter. Source: Standford
The above is an example of a convolution with a 3x3 filter and a stride of 1 being applied to data with a range
of 0 to 1. The convolution for each 3x3 section is calculated against the weight, ``[[1, 0, 1], [0, 1, 0], [1, 0,
1]]``, then a bias is added to create the convolved feature on the right. In this case, the bias is zero. In
TensorFlow, this is all done using ``tf.nn.conv2d()`` and ``tf.nn.bias_add()``.
The tf.nn.conv2d() function computes the convolution against weight W as shown above.
In TensorFlow, ``strides`` is an array of 4 elements; the first element in this array indicates the stride for
batch and last element indicates stride for features. It's good practice to remove the batches or features you
want to skip from the data set rather than use a stride to skip them. You can always set the first and last
element to 1 in ``strides`` in order to use all batches and features.
The middle two elements are the strides for height and width respectively. I've mentioned stride as one number
because you usually have a square stride where height = width. When someone says they are using a stride of 3,
they usually mean ``tf.nn.conv2d(x, W, strides=[1, 3, 3, 1])``.
To make life easier, the code is using ``tf.nn.bias_add()`` to add the bias. Using ``tf.add()`` doesn't work when
the tensors aren't the same shape.
Parameters
----------
x
W
b
strides
Returns
-------
"""
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
"""
.. figure:: max-pooling.png
:align: center
Max Pooling with 2x2 filter and stride of 2. Source: Wikipedia
The above is an example of max pooling with a 2x2 filter and stride of 2. The left square is the input and the
right square is the output. The four 2x2 colors in input represents each time the filter was applied to create
the max on the right side. For example, ``[[1, 1], [5, 6]]`` becomes 6 and ``[[3, 2], [1, 2]]`` becomes 3.
The ``tf.nn.max_pool()`` function does exactly what you would expect, it performs max pooling with the ``ksize``
parameter as the size of the filter.
Parameters
----------
x
k
Returns
-------
"""
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def conv_net(x, weights, biases, dropout):
"""
.. figure:: model.png
:align: center
Different ways to model. Source: Udacity
In the code below, we're creating 3 layers alternating between convolutions and ma | x pooling followed by a fully
connected and output layer. The transformation of each layer to new dimensions are shown in the comments. For
example, the first layer shapes the images from 28x28x1 to 28x | 28x32 in the convolution step. Then next step
applies max pooling, turning each sample into 14x14x32. All the layers are applied from conv1 to output,
producing 10 class predictions.
Parameters
----------
x
weights
biases
dropout
Returns
-------
"""
# Layer 1 - 28*28*1 to 14*14*32
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
conv1 = maxpool2d(conv1, k=2)
# Layer 2 - 14*14*32 to 7*7*64
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer - 7*7*64 to 1024
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
# Output Layer - class prediction - 1024 to 10
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
def run():
"""
Run the session and print the epoch number, batch and total accuracy.
"""
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True, reshape=False)
# tf Graph input
x = tf.placeholder(tf.float32, [None, 28, 28, 1])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# Model
logits = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) \
.minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
for batch in range(mnist.train.num_examples // batch_size):
batch_x, batch_y = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
x: batch_x,
y: batch_y,
keep_prob: dropout})
# Calculate batch loss and accuracy
loss = sess.run(cost, feed_dict={
x: batch_x,
y: batch_y,
keep_prob: 1.})
valid_acc = sess.run(accuracy, feed_dict={
x: mnist.validation.images[:test_valid_size],
y: mnist.validation.labels[:test_valid_size],
keep_prob: 1.})
print('Epoch {:>2}, Batch {:>3} -'
'Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format(
epoch + 1,
batch + 1,
loss,
valid_acc))
# Calculate Test Accuracy
test_acc = sess.run(accuracy, feed_dict={
x: mnist.test.images[:test_valid_size],
y: mnist.test.labels[:test_valid_size],
keep_prob: 1.})
print('Testing Accuracy: {}'.format(test_acc))
if __name__ == '__main__':
run()
|
Hybrid-Cloud/badam | patches_tool/aws_patch/code/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py | Python | apache-2.0 | 107,162 | 0.001194 | #!/usr/bin/env python
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import re
import signal
import sys
import time
import random
import eventlet
eventlet.monkey_patch()
import netaddr
from neutron.plugins.openvswitch.agent import ovs_dvr_neutron_agent
from oslo.config import cfg
from six import moves
from oslo import messaging
from neutron.agent import l2population_rpc
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import polling
from neutron.agent.linux import utils
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_const
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.extensions import qos
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.ope | nvswitch.common import constants
from neutron.plugins.openvswitch.agent import neutron_bus_client
from neutron.services.qos.agents import qos_rpc
LOG = logging.getLogger(__name__)
# A placeholder for dead vlans.
DEAD_VLAN_TAG = str(q_const.MAX_VLAN_TAG + 1)
Agent_Start_Report_Retry_Interval = 2
class DeviceListRetrieva | lError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s "
"because of error: %(error)s")
class AgentError(exceptions.NeutronException):
msg_fmt = _('Error during following call to agent: %(method)s')
# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac'
# attributes set).
class LocalVLANMapping:
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class VmPort:
def __init__(self, mac, ip_address=None):
self.mac_address = mac
if(not ip_address):
self.ip_address = set()
else:
self.ip_address = set()
self.ip_address.add(ip_address)
class VLANBridge(ovs_lib.OVSBridge):
"""Extends OVSBridge for Trunkport."""
def __init__(self, br_name, int_br, root_helper):
super(VLANBridge, self).__init__(br_name, root_helper)
self.int_br = int_br
self.vlan_port_name = "pvm" + br_name[3:]
self.int_br_port_name = "pin" + br_name[3:]
def configure(self):
ports = self.get_port_name_list()
if self.vlan_port_name not in ports:
self.add_patch_port(
self.vlan_port_name, self.int_br_port_name)
self.vlan_ofport = self.get_port_ofport(self.vlan_port_name)
ports = self.int_br.get_port_name_list()
if self.int_br_port_name not in ports:
self.int_br.add_patch_port(
self.int_br_port_name, self.vlan_port_name)
self.int_br_ofport = self.get_port_ofport(
self.int_br_port_name)
self.delete_flows(in_port=self.vlan_ofport)
self.add_flow(priority=2,
in_port=self.vlan_ofport,
actions="drop")
self.int_br.delete_flows(in_port=self.int_br_ofport)
self.int_br.add_flow(priority=2,
in_port=self.int_br_ofport,
actions="drop")
def cleanup_bridge(self):
LOG.debug(_("Cleanup bridge br-int %s"), self.int_br_ofport)
self.int_br.delete_flows(in_port=self.int_br_ofport)
if self.bridge_exists(self.br_name):
self.delete_flows(in_port=self.vlan_ofport)
def _get_flows(self, br, of_port):
flow_list = br.run_ofctl("dump-flows",
["in_port=%s" % of_port]).split("\n")[1:]
p1 = re.compile('in_port=(\d+),dl_vlan=(\d+).*mod_vlan_vid:(\d+)')
p2 = re.compile('in_port=(\d+),dl_vlan=(\d+).*strip_vlan')
f = set()
for l in flow_list:
m = p1.search(l)
if m:
in_port = m.group(1)
f_vid = int(m.group(2))
t_vid = int(m.group(3))
if(in_port == of_port):
f.add((f_vid, t_vid))
m = p2.search(l)
if m:
in_port = m.group(1)
f_vid = int(m.group(2))
t_vid = "Untagged"
if(in_port == of_port):
f.add((f_vid, t_vid))
return f
def init_flow_check(self):
self.current_flows = self._get_flows(self,
self.vlan_ofport)
self.new_flows = set()
def init_int_br_flow_check(self):
self.current_int_br_flows = self._get_flows(self.int_br,
self.int_br_ofport)
self.new_int_br_flows = set()
def _set_mapping(self, vm_flow_vid, int_br_vid, record_vid, action):
self.new_int_br_flows.add((record_vid, int_br_vid))
if (record_vid, int_br_vid) not in self.current_int_br_flows:
self.int_br.add_flow(
priority=3, in_port=self.int_br_ofport,
dl_vlan=vm_flow_vid,
actions="mod_vlan_vid:%s,normal" % int_br_vid)
else:
LOG.debug(_("Flow already in place: %s"), (record_vid, int_br_vid))
self.new_flows.add((int_br_vid, record_vid))
if (int_br_vid, record_vid) not in self.current_flows:
self.add_flow(priority=3,
in_port=self.vlan_ofport,
dl_vlan=int_br_vid,
actions=action)
else:
LOG.debug(_("Flow already in place: %s"), (int_br_vid, record_vid))
def set_mapping(self, vm_vid, int_br_vid):
if vm_vid is None:
self._set_mapping(0xffff, int_br_vid, "Untagged",
"strip_vlan,normal")
else:
self._set_mapping(vm_vid, int_br_vid, vm_vid,
"mod_vlan_vid:%s,normal" % vm_vid)
def remove_flows(self,vid, local_vlan):
self.int_br.delete_flows(in_port=self.int_br_ofport,
dl_vlan=vid)
self.delete_flows(in_port=self.vlan_ofport,
dl_vlan=local_vlan)
def remove_extra_flows(self):
remove = self.current_flows - self.new_flows
int_br_remove = self.current_int_br_flows - self.new_int_br_flows
for f in remove:
if f[0] == 'Untagged':
pass
else:
self.delete_flows(in_port=self.vlan_ofport,
dl_vlan=f[0])
for f in int_br_remove:
if f[0] == 'Unt |
jumpstarter-io/keystone | keystone/contrib/oauth1/controllers.py | Python | apache-2.0 | 17,478 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extensions supporting OAuth1."""
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone.contrib.oauth1 import core as oauth1
from keystone.contrib.oauth1 import validator
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
from keystone import notifications
CONF = cfg.CONF
@notifications.internal(notifications.INVALIDATE_USER_OAUTH | _CONSUMER_TOKENS,
resource_id | _arg_index=0)
def _emit_user_oauth_consumer_token_invalidate(payload):
# This is a special case notification that expect the payload to be a dict
# containing the user_id and the consumer_id. This is so that the token
# provider can invalidate any tokens in the token persistence if
# token persistence is enabled
pass
@dependency.requires('oauth_api', 'token_provider_api')
class ConsumerCrudV3(controller.V3Controller):
collection_name = 'consumers'
member_name = 'consumer'
@classmethod
def base_url(cls, context, path=None):
"""Construct a path and pass it to V3Controller.base_url method."""
# NOTE(stevemar): Overriding path to /OS-OAUTH1/consumers so that
# V3Controller.base_url handles setting the self link correctly.
path = '/OS-OAUTH1/' + cls.collection_name
return controller.V3Controller.base_url(context, path=path)
@controller.protected()
def create_consumer(self, context, consumer):
ref = self._assign_unique_id(self._normalize_dict(consumer))
initiator = notifications._get_request_audit_info(context)
consumer_ref = self.oauth_api.create_consumer(ref, initiator)
return ConsumerCrudV3.wrap_member(context, consumer_ref)
@controller.protected()
def update_consumer(self, context, consumer_id, consumer):
self._require_matching_id(consumer_id, consumer)
ref = self._normalize_dict(consumer)
self._validate_consumer_ref(ref)
initiator = notifications._get_request_audit_info(context)
ref = self.oauth_api.update_consumer(consumer_id, ref, initiator)
return ConsumerCrudV3.wrap_member(context, ref)
@controller.protected()
def list_consumers(self, context):
ref = self.oauth_api.list_consumers()
return ConsumerCrudV3.wrap_collection(context, ref)
@controller.protected()
def get_consumer(self, context, consumer_id):
ref = self.oauth_api.get_consumer(consumer_id)
return ConsumerCrudV3.wrap_member(context, ref)
@controller.protected()
def delete_consumer(self, context, consumer_id):
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
payload = {'user_id': user_token_ref.user_id,
'consumer_id': consumer_id}
_emit_user_oauth_consumer_token_invalidate(payload)
initiator = notifications._get_request_audit_info(context)
self.oauth_api.delete_consumer(consumer_id, initiator)
def _validate_consumer_ref(self, consumer):
if 'secret' in consumer:
msg = _('Cannot change consumer secret')
raise exception.ValidationError(message=msg)
@dependency.requires('oauth_api')
class AccessTokenCrudV3(controller.V3Controller):
collection_name = 'access_tokens'
member_name = 'access_token'
@classmethod
def _add_self_referential_link(cls, context, ref):
# NOTE(lwolf): overriding method to add proper path to self link
ref.setdefault('links', {})
path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
'user_id': cls._get_user_id(ref)
}
ref['links']['self'] = cls.base_url(context, path) + '/' + ref['id']
@controller.protected()
def get_access_token(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.NotFound()
access_token = self._format_token_entity(context, access_token)
return AccessTokenCrudV3.wrap_member(context, access_token)
@controller.protected()
def list_access_tokens(self, context, user_id):
auth_context = context.get('environment',
{}).get('KEYSTONE_AUTH_CONTEXT', {})
if auth_context.get('is_delegated_auth'):
raise exception.Forbidden(
_('Cannot list request tokens'
' with a token issued via delegation.'))
refs = self.oauth_api.list_access_tokens(user_id)
formatted_refs = ([self._format_token_entity(context, x)
for x in refs])
return AccessTokenCrudV3.wrap_collection(context, formatted_refs)
@controller.protected()
def delete_access_token(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
consumer_id = access_token['consumer_id']
payload = {'user_id': user_id, 'consumer_id': consumer_id}
_emit_user_oauth_consumer_token_invalidate(payload)
initiator = notifications._get_request_audit_info(context)
return self.oauth_api.delete_access_token(
user_id, access_token_id, initiator)
@staticmethod
def _get_user_id(entity):
return entity.get('authorizing_user_id', '')
def _format_token_entity(self, context, entity):
formatted_entity = entity.copy()
access_token_id = formatted_entity['id']
user_id = self._get_user_id(formatted_entity)
if 'role_ids' in entity:
formatted_entity.pop('role_ids')
if 'access_secret' in entity:
formatted_entity.pop('access_secret')
url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
'/roles' % {'user_id': user_id,
'access_token_id': access_token_id})
formatted_entity.setdefault('links', {})
formatted_entity['links']['roles'] = (self.base_url(context, url))
return formatted_entity
@dependency.requires('oauth_api', 'role_api')
class AccessTokenRolesV3(controller.V3Controller):
collection_name = 'roles'
member_name = 'role'
@controller.protected()
def list_access_token_roles(self, context, user_id, access_token_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.NotFound()
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
refs = ([self._format_role_entity(x) for x in authed_role_ids])
return AccessTokenRolesV3.wrap_collection(context, refs)
@controller.protected()
def get_access_token_role(self, context, user_id,
access_token_id, role_id):
access_token = self.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise exception.Unauthorized(_('User IDs do not match'))
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
for authed_role_id in authed_role_ids:
if authed_role_id == role_id:
role = self._format_rol |
ArcherSys/ArcherSys | skulpt/test/run/t152.py | Python | mit | 124 | 0.080645 | print str(ran | ge(5,0,-3))[:5]
print len(range(5,0,-3))
print range(5,0,-3)[0]
print range(5,0,-3)[1]
print range( | 5,0,-3)[-1]
|
jbonofre/beam | sdks/python/apache_beam/runners/direct/clock.py | Python | apache-2.0 | 1,544 | 0.004534 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ow | nership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the s | pecific language governing permissions and
# limitations under the License.
#
"""Clock implementations for real time processing and testing.
For internal use only. No backwards compatibility guarantees.
"""
from __future__ import absolute_import
import time
class Clock(object):
def time(self):
"""Returns the number of seconds since epoch."""
raise NotImplementedError()
def advance_time(self, advance_by):
"""Advances the clock by a number of seconds."""
raise NotImplementedError()
class RealClock(object):
def time(self):
return time.time()
class TestClock(object):
"""Clock used for Testing"""
def __init__(self, current_time=0):
self._current_time = current_time
def time(self):
return self._current_time
def advance_time(self, advance_by):
self._current_time += advance_by
|
mvaled/sentry | src/sentry/south_migrations/0458_global_searches_data_migration.py | Python | bsd-3-clause | 120,420 | 0.007889 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
DEFAULT_SAVED_SEARCHES = [
{
'name': 'Unresolved Issues',
'query': 'is:unresolved',
},
{
'name': 'Needs Triage',
'query': 'is:unresolved is:unassigned'
},
{
'name': 'Assigned To Me',
'query': 'is:unresolved assigned:me'
},
{
'name': 'My Bookmarks',
'query': 'is:unresolved bookmarks:me'
},
{
'name': 'New Today',
'query': 'is:unresolved age:-24h'
},
]
class Migration(DataMigration):
# Flag to indicate if this migration is too risky
# to run online and needs to be coordinated for offline
is_dangerous = True
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
SavedSearch = orm['sentry.SavedSearch']
for search in RangeQuerySetWrapperWithProgressBar(
SavedSearch.objects.filter(is_global__isnull=True)
):
search.is_global = False
search.save()
default_searches = []
for search in DEFAULT_SAVED_SEARCHES:
default_searches.append(
SavedSearch(
name=search['name'],
query=search['query'],
is_global=True,
)
)
SavedSearch.objects.bulk_create(default_searches)
def backwards(self, orm):
"Write your backwards methods here."
# These will be the only rows with a null `project_id`, so we can safely
# make the column `not null` after deleting them.
SavedSearch = orm['sentry.SavedSearch']
SavedSearch.objects.filter(is_global=True).delete()
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Gro | up']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.mo | dels.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'82029f091b094a2ca18ef45d3958513c683b4643c65f4fbfacfbd1cdee187a51'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'74093ba9478e4d41ae25dfcb036bd062ea58b43d394140a4989d6ec19f179b6a'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Proper Crawdad'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'12d42834d62142d4beaecf34588354dd'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2019, 1, 18, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': (u'django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fiel |
nwillemse/nctrader | nctrader/trading_session/trade_sim.py | Python | mit | 2,643 | 0.000757 | from __future__ import print_function
from ..compat import queue
from ..event import EventType
from datetime import datetime
class TradeSim(object):
"""
Enscapsulates the settings and components for
carrying out an event-driven trade simulator.
"""
def __init__(
self, price_handler, strategy, portfolio_handler, execution_handler,
position_sizer, risk_manager, statistics, equity, end_date=None
):
"""
Set up the variables according to
what has been passed in.
"""
self.price_handler = price_handler
self.strategy = strategy
self.portfolio_handler = portfolio_handler
self.execution_ | handler = execution_handler
self.position_sizer = position_sizer
self.risk_manager = risk_manager
self.statistics = statistics
self.equity = equity
self.end_date = end_date
self.events_queue = price_handler.events_queue
self.cur_time = None
def _run_backtest(self):
| """
Carries out an infinite while loop that polls the
events queue and directs each event to either the
strategy component of the execution handler. The
loop continue until the event queue has been
emptied.
"""
print("Running Backtest...")
while self.price_handler.continue_backtest:
try:
event = self.events_queue.get(False)
except queue.Empty:
self.price_handler.stream_next()
else:
if (event.type == EventType.BAR and event.time > self.end_date):
continue
if event.type == EventType.BAR:
self.cur_time = event.time
self.strategy.on_bar(event)
self.portfolio_handler.update_portfolio_value()
self.statistics.update(event)
elif event.type == EventType.TRADE:
self.portfolio_handler.on_trade(event)
elif event.type == EventType.ORDER:
self.execution_handler.execute_order(event)
elif event.type == EventType.FILL:
self.portfolio_handler.on_fill(event)
else:
raise NotImplemented("Unsupported event.type '%s'" % event.type)
def simulate_trading(self, testing=False):
"""
Simulates the backtest and outputs portfolio performance.
"""
self._run_backtest()
print("---------------------------------")
print("Backtest complete.")
self.statistics.save()
|
ohickman/etch-a-sketch | point.py | Python | gpl-3.0 | 4,079 | 0.001961 | """
Extensible image processing and Etch-a-sketch drawing program
Copyright (C) 2014 Oliver Hickman
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
class Point( object ):
""" The Point class represents x, y pairs in a cartesian coordinate system
x and y are intended to be ints, though at the moment I'm not testing for
that. In the future I'll probably make another class that inherits from
Point and adds the requirement that elements be ints."""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
@property
def x(self):
""" x coordinate is a property of Point, must be any numeric data type.
>>> a = Point(9,0)
>>> a.x = 8
>>> a.y = "elephant"
Could not convert "elephant" into an integer."""
return self._x
@x.setter
def x(self, x):
try:
x = int(x)
except ValueError:
print ("Could not convert \"%s\" into an integer." %x)
x = 0
self._x = x
@property
def y(self):
""" y coordinate is a property of Point, must be an | y numeric data type.
>>> a = Point(0,8)
>>> a.y = 8"""
return self._y
@y.setter
def y(self, y):
try:
y = int(y)
except ValueError:
print ("Could not convert \"%s\" into an integer." %y)
y = 0
self._y = y
def __rmul__(self, other):
""" Piecewise multiplies a scalar by each element in Point.
| >>> a = Point(1,2)
>>> 2*a
(2, 4)"""
return Point(other*self.x, other*self.y)
def __add__(self, other):
""" Piecewise adds two Points together.
>>> a = Point(1,2)
>>> b = Point(1,2)
>>> a + b
(2, 4)
"""
return Point(self.x + other.x, self.y + other.y)
def __neg__(self):
""" Piecewise inverts each element in the Point.
>>> a = Point(1,2)
>>> -a
(-1, -2)
>>> b = Point(4,-7)
>>> -b
(-4, 7)
>>> c = Point()
>>> -c
(0, 0)
"""
return Point(-self.x, -self.y)
def __sub__(self, other):
""" Piecewise subtracts the elements of other from the elements of self.
>>> a = Point(4,7)
>>> b = Point(3,9)
>>> a - b
(1, -2)
>>> c = Point()
>>> a - c
(4, 7)
"""
return self + (-other)
def __eq__(self, other):
"""Tests to see if two Point objects are piecewise equal. The pieces
don't need to be of the same type, just the same numeric value.
>>> a = Point(1,2)
>>> b = Point(1,2)
>>> a == b
True
>>> c = Point() # default point = (0,0)
>>> a == c
False
"""
if not isinstance(other, Point):
return NotImplemented
else:
return self.x == other.x and self.y == other.y
def __repr__(self):
""" Machine readable representation of a Point in the format (x, y).
This is the format that you'd want when building a list of points.
>>> a = Point()
>>> a
(0, 0)"""
return "({0._x!r}, {0._y!r})".format(self)
def __str__(self):
""" Human readable representation of a Point.
>>> a = Point()
>>> print a
Point(0, 0)"""
return "Point({0._x!r}, {0._y!r})".format(self)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
zouyapeng/horizon-newtouch | openstack_dashboard/dashboards/admin/info/tabs.py | Python | apache-2.0 | 3,879 | 0 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon imp | ort exceptions
from horizo | n import tabs
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.admin.info import constants
from openstack_dashboard.dashboards.admin.info import tables
class ServicesTab(tabs.TableTab):
table_classes = (tables.ServicesTable,)
name = _("Services")
slug = "services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def get_services_data(self):
request = self.tab_group.request
services = []
for i, service in enumerate(request.user.service_catalog):
service['id'] = i
services.append(
keystone.Service(service, request.user.services_region))
return services
class NovaServicesTab(tabs.TableTab):
table_classes = (tables.NovaServicesTable,)
name = _("Compute Services")
slug = "nova_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.compute',)
def get_nova_services_data(self):
try:
services = nova.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get nova services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class CinderServicesTab(tabs.TableTab):
table_classes = (tables.CinderServicesTable,)
name = _("Block Storage Services")
slug = "cinder_services"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
permissions = ('openstack.services.volume',)
def get_cinder_services_data(self):
try:
services = cinder.service_list(self.tab_group.request)
except Exception:
msg = _('Unable to get cinder services list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
services = []
return services
class NetworkAgentsTab(tabs.TableTab):
table_classes = (tables.NetworkAgentsTable,)
name = _("Network Agents")
slug = "network_agents"
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
def allowed(self, request):
try:
return (base.is_service_enabled(request, 'network') and
neutron.is_extension_supported(request, 'agent'))
except Exception:
exceptions.handle(request, _('Unable to get network agents info.'))
return False
def get_network_agents_data(self):
try:
agents = neutron.agent_list(self.tab_group.request)
except Exception:
msg = _('Unable to get network agents list.')
exceptions.check_message(["Connection", "refused"], msg)
exceptions.handle(self.request, msg)
agents = []
return agents
class SystemInfoTabs(tabs.TabGroup):
slug = "system_info"
tabs = (ServicesTab, NovaServicesTab, CinderServicesTab,
NetworkAgentsTab)
sticky = True
|
bigswitch/horizon | openstack_dashboard/test/error_pages_urls.py | Python | apache-2.0 | 184 | 0 | from django.conf.url | s import url
from django. | views import defaults
from openstack_dashboard.urls import urlpatterns # noqa
urlpatterns.append(url(r'^500/$', defaults.server_error))
|
GMadorell/larv | larv/Entity.py | Python | mit | 493 | 0 | # encoding: | UTF-8
class Entity:
"""
An entity is just an ID, which will be assigned some components.
Components hold the data and systems are the responsables of the logic
behind it.
"""
def __init__(self, id):
"""
Con | structor.
We should pass a unique id everytime we create a new entity.
"""
self.__id = id
@property
def id(self):
"""Read only property for the id private variable."""
return self.__id
|
mdelorme/MNn | mnn/examples/simple_model.py | Python | mit | 1,497 | 0.002004 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mnn.model import MNnModel
# Single disc
model = MNnModel()
model.add_disc('z', 1.0, 10.0, 100.0)
# Evaluating density and potential :
print(model.evaluate_density(1.0, 2.0, -0.5))
print(model.evaluate_potential(1.0, 2.0, -0.5))
print(model.evaluate_force(1.0, 2.0, -0.5))
# Using vectors to evaluate density along an axis :
x = np.linspace(0.0, 30.0, 100.0)
densi | ty = model.evaluate_density(x, 0.0, 0.0)
fig = plt.plot(x, density)
plt.show()
# Plotting density meshgrid
x, y, | z, v = model.generate_dataset_meshgrid((0.0, 0.0, -10.0), (30.0, 0.0, 10.0), (300, 1, 200))
fig = plt.imshow(v[:, 0].T)
plt.show()
# Contour plot
x = np.linspace(0.0, 30.0, 300)
z = np.linspace(-10.0, 10.0, 200)
plt.contour(x, z, v[:, 0].T)
plt.show()
# Plotting force meshgrid
plt.close('all')
x, y, z, f = model.generate_dataset_meshgrid((-30.0, -30.0, 0.0), (30.0, 30.0, 0.0), (30, 30, 1), 'force')
x = x[:, :, 0].reshape(-1)
y = y[:, :, 0].reshape(-1)
fx = f[0, :, :, 0].reshape(-1)
fy = f[1, :, :, 0].reshape(-1)
extent = [x.min(), x.max(), y.min(), y.max()]
plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[1, 0])
pl1 = ax1.imshow(f[1, :, :, 0].T, extent=extent, aspect='auto')
ax2 = plt.subplot(gs[0, 1])
pl2 = ax2.imshow(f[0, :, :, 0].T, extent=extent, aspect='auto')
ax3 = plt.subplot(gs[1, 1])
pl3 = ax3.quiver(x.T, y.T, fx.T, fy.T, units='width', scale=0.045)
plt.show()
|
DavidMikeSimon/satyrnos | resman.py | Python | gpl-3.0 | 1,503 | 0.031271 | import pygame
import os
from OpenGL.GL import *
from OpenGL.GLU import *
import app
from geometry import *
class Texture(object):
"""An OpenGL 2D texture.
Data attributes:
filename -- The filename that the texture w | as loaded from, or an empty string
glname -- The OpenGL texture name.
size -- The dimensions of the texture as a Size.
surf -- The PyGame surface.
"""
cache = {} #Key: filename, value: Texture instance
def __new__(cls, filename):
"""Creates a Texture from an image file, using pre-cached version if it exists."""
if Texture.cache.has_key(filename):
return Texture.cache[filename]
else:
obj = object.__new__(cls)
obj.filename = filename
Texture.cache[f | ilename] = obj
obj.glname = glGenTextures(1)
fullpath = os.path.join('imgs', filename)
surf = pygame.image.load(fullpath)
obj.surf = surf
obj.size = Size(surf.get_width(), surf.get_height())
texData = pygame.image.tostring(surf, "RGBA", 1)
glBindTexture(GL_TEXTURE_2D, obj.glname)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, surf.get_width(), surf.get_height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, texData)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
return obj
def unload_all():
"""Unloads all resources.
Invalidates all instances of any of the classes in this module."""
if len(Texture.cache) > 0:
glDeleteTextures([ x.glname for x in Texture.cache.values() ])
Texture.cache = {}
|
ThomasYeoLab/CBIG | setup/tests/hooks_tests/pre_commit_tests/G_check_flake8_format/notfollow_flake8.py | Python | mit | 380 | 0.068421 | # Written by Nanbo Sun and CBIG under MIT license: https:// | github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
x = { 'a':37,'b':42,
'c':927}
y = 'hello ''world'
z = 'hello '+'world'
a = 'hello {}'.format('world')
class foo ( object ):
def f | (self ):
return 37*-+2
def g(self, x,y=42):
return y
def f ( a ) :
return 37+-+a[42-x : y**3]
|
weechat/weechat.org | weechat/common/path.py | Python | gpl-3.0 | 1,672 | 0 | #
# Copyright (C) 2003-2022 Sébastien Helleu <flashc | ode@flashtux.org>
#
# This fil | e is part of WeeChat.org.
#
# WeeChat.org is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# WeeChat.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WeeChat.org. If not, see <https://www.gnu.org/licenses/>.
#
"""Some useful path functions."""
import os
from django.conf import settings
def __path_join(base, *args):
"""
Join multiple paths after 'base' and ensure the result is still
under 'base'.
"""
base = os.path.normpath(base)
directory = os.path.normpath(os.path.join(base, *args))
if directory.startswith(base):
return directory
return ''
def project_path_join(*args):
"""Join multiple paths after settings.BASE_DIR."""
return __path_join(settings.BASE_DIR, *args)
def files_path_join(*args):
"""Join multiple paths after settings.FILES_ROOT."""
return __path_join(settings.FILES_ROOT, *args)
def media_path_join(*args):
"""Join multiple paths after settings.MEDIA_ROOT."""
return __path_join(settings.MEDIA_ROOT, *args)
def repo_path_join(*args):
"""Join multiple paths after settings.REPO_DIR."""
return __path_join(settings.REPO_DIR, *args)
|
PiotrGrzybowski/NeuralNetworks | networks/neurons/activations.py | Python | apache-2.0 | 291 | 0.003436 | UNIPOLAR = 'unipolar'
BIPOLAR | = 'bipolar'
def unipolar(x):
return 1 if x >= 0 else 0
def bipolar(x):
return 1 if x >= 0 else -1
def get_activation(activation):
return ACTIVATION_FUNCTIONS[activation]
ACTIVATION_FUNCTIONS = {
UNIPOLAR: unipolar | ,
BIPOLAR: bipolar
}
|
die-uhus/blueman | blueman/bluez/obex/AgentManager.py | Python | gpl-3.0 | 976 | 0.003074 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
from blueman.Functions import dprint
from blueman.bluez.obex.Base import Base
class AgentManager(Base):
def __init__(self):
super(AgentManager, self).__init__('org.bluez.obex.AgentManager1', '/org/bluez/obex')
def register_agent(self, agent_path):
def on_registered():
dprint(agent_path)
def on_register_failed(error):
| dprint(agent_path, error)
self._interface.RegisterAgent(agent_path, reply_handler=on_registered, error_handler=on_register_failed)
def unregister_agent(self, agent_pat | h):
def on_unregistered():
dprint(agent_path)
def on_unregister_failed(error):
dprint(agent_path, error)
self._interface.UnregisterAgent(agent_path, reply_handler=on_unregistered, error_handler=on_unregister_failed)
|
SmartInfrastructures/fuel-web-dev | nailgun/nailgun/api/v1/handlers/plugin.py | Python | apache-2.0 | 2,321 | 0 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nailgun.api.v1.handlers import base
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators import plugin
from nailgun.errors import errors
from nailgun import objects
from nailgun.plugins.manager import PluginManager
class PluginHandler(base.SingleHandler):
validator = plugin.PluginValidator
single = objects.Plugin
class PluginCollectionHandler(base.CollectionHandler):
collection = objects.PluginCollection
validator = plugin.PluginValidator
@content
def POST(self):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
* 409 (object with such parameters already exists)
"""
data = self.checked_data(self.validator.validate)
obj = self.collection.single.get_by_name_version(
data['name'], data['version'])
if obj:
raise self.http(409, self.collection.single.to_json(obj))
return super(PluginCollectionHandler, self).POST()
class PluginSyncHandler(base.BaseHandler):
validator = plugin.PluginSyncValidator
@content
def POST(sel | f):
""":returns: JSONized REST object.
:http: * 200 (plugins successfully synced)
| * 404 (plugin not found in db)
* 400 (problem with parsing metadata file)
"""
data = self.checked_data()
ids = data.get('ids', None)
try:
PluginManager.sync_plugins_metadata(plugin_ids=ids)
except errors.ParseError as exc:
raise self.http(400, msg=six.text_type(exc))
raise self.http(200, {})
|
markgw/jazzparser | lib/nltk/corpus/__init__.py | Python | gpl-3.0 | 10,492 | 0.001906 | # Natural Language Toolkit: Corpus Readers
#
# Copyright (C) 2001-2010 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# [xx] this docstring isnt' up-to-date!
"""
NLTK corpus readers. The modules in this package provide functions
that can be used to read corpus files in a variety of formats. These
functions can be used to read both the corpus files that are
distributed in the NLTK corpus package, and corpus files that are part
of external corpora.
Available Corpora
=================
Please see http://nltk.googlecode.com/svn/trunk/nltk_data/index.xml
for a complete list. Install corpora using nltk.download().
Corpus Reader Functions
=======================
Each corpus module defines one or more X{corpus reader functions},
which can be used to read documents from that corpus. These functions
take an argument, C{item}, which is used to indicate which document
should be read from the corpus:
- If C{item} is one of the unique identifiers listed in the corpus
module's C{items} variable, then the corresponding document will
be loaded from the NLTK corpus package.
- If C{item} is a filename, then that file will be read.
Additionally, corpus reader functions can be given lists of item
names; in which case, they will return a concatenation of the
corresponding documents.
Corpus reader functions are named based on the type of information
they return. Some common examples, and their return types, are:
- I{corpus}.words(): list of str
- I{corpus}.sents(): list of (list of str)
- I{corpus}.paras(): list of (list of (list of str))
- I{corpus}.tagged_words(): list of (str,str) tuple
- I{corpus}.tagged_sents(): list of (list of (str,str))
- I{corpus}.tagged_paras(): list of (list of (list of (str,str)))
- I{corpus}.chunked_sents(): list of (Tree w/ (str,str) leaves)
- I{corpus}.parsed_sents(): list of (Tree with str leaves)
- I{corpus}.parsed_paras(): list of (list of (Tree with str leaves))
- I{corpus}.xml(): A single xml ElementTree
- I{corpus}.raw(): unprocessed corpus contents
For example, to read a list of the words in the Brown Corpus, use
C{nltk.corpus.brown.words()}:
>>> from nltk.corpus import brown
>>> print brown.words()
['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', ...]
Corpus Metadata
===============
Metadata about the NLTK corpora, and their individual documents, is
stored using U{Open Language Archives Community (OLAC)
<http://www.language-archives.org/>} metadata records. These records
can be accessed using C{nltk.corpus.I{corpus}.olac()}.
"""
import re
from nltk.tokenize import RegexpTokenizer
from nltk.tag import simplify_brown_tag, simplify_wsj_tag,\
simplify_alpino_tag, simplify_indian_tag,\
simplify_tag
from util import LazyCorpusLoader
from reader import *
abc = LazyCorpusLoader(
'abc', PlaintextCorpusReader, r'(?!\.).*\.txt')
alpino = LazyCorpusLoader(
'alpino', AlpinoCorpusReader, tag_mapping_function=simplify_alpino_tag)
brown = LazyCorpusLoader(
'brown', CategorizedTaggedCorpusReader, r'c[a-z]\d\d',
cat_file='cats.txt', tag_mapping_function=simplify_brown_tag)
cess_cat = LazyCorpusLoader(
'cess_cat', BracketParseCorpusReader, r'(?!\.).*\.tbf',
tag_mapping_function=simplify_tag)
cess_esp = LazyCorpusLoader(
'cess_esp', BracketParseCorpusReader, r'(?!\.).*\.tbf',
tag_mapping_function=simplify_tag)
cmudict = LazyCorpusLoader(
'cmudict', CMUDictCorpusReader, ['cmudict'])
comtrans = LazyCorpusLoader(
'comtrans', AlignedCorpusReader, r'(?!README|\.).*')
conll2000 = LazyCorpusLoader(
'conll2000', ConllChunkCorpusReader,
['train.txt', 'test.txt'], ('NP','VP','PP'))
conll2002 = LazyCorpusLoader(
'conll2002', ConllChunkCorpusReader, '.*\.(test|train).*',
('LOC', 'PER', 'ORG', 'MISC'), encoding='utf-8')
conll2007 = LazyCorpusLoader(
'conll2007', DependencyCorpusReader, '.*\.(test|train).*',
encoding='utf-8')
dependency_treebank = LazyCorpusLoader(
'dependency_treebank', DependencyCorpusReader, '.*\.dp')
floresta = LazyCorpusLoader(
'floresta', BracketParseCorpusReader, r'(?!\.).*\.ptb', '#',
tag_mapping_function=simplify_tag)
gazetteers = LazyCorpusLoader(
'gazetteers', WordListCorpusReader, r'(?!LICENSE|\.).*\.txt')
genesis = LazyCorpusLoader(
'genesis', PlaintextCorpusReader, r'(?!\.).*\.txt', encoding=[
('finnish|french|german', 'latin_1'),
('swedish', 'cp865'),
('.*', 'utf_8')])
gutenberg = LazyCorpusLoader(
'gutenberg', PlaintextCorpusReader, r'(?!\.).*\.txt')
# corpus not available with NLTK; these lines caused help(nltk.corpus) to break
#hebrew_treebank = LazyCorpusLoader(
# 'hebrew_treebank', BracketParseCorpusReader, r'.*\.txt')
ieer = LazyCorpusLoader(
'ieer', IEERCorpusReader, r'(?!README|\.).*')
inaugural = LazyCorpusLoader(
'inaugural', PlaintextCorpusReader, r'(?!\.).*\.txt')
# [XX] This should probably just use TaggedCorpusReader:
indian = LazyCorpusLoader(
'indian', IndianCorpusReader, r'(?!\.).*\.pos',
tag_mapping_function=simplify_indian_tag)
ipipan = LazyCorpusLoader(
'ipipan', IPIPANCorpusReader, r'(?!\.).*morph\.xml')
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*\.chasen', encoding | ='utf-8')
knbc = LazyCorpusLoader(
'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp')
mac_morpho = LazyCorpusLoader(
'mac_morpho', MacMorphoCorpusReader, r'(?!\.).*\.txt',
tag_mapping_function=simplify_tag, encoding='latin-1')
machado = LazyCorpusLoader(
'machado', PortugueseCategorizedPlaintextCorpusReader,
r'(?!\.).*\.txt', cat_pattern=r'([a- | z]*)/.*', encoding='latin-1')
movie_reviews = LazyCorpusLoader(
'movie_reviews', CategorizedPlaintextCorpusReader,
r'(?!\.).*\.txt', cat_pattern=r'(neg|pos)/.*')
names = LazyCorpusLoader(
'names', WordListCorpusReader, r'(?!\.).*\.txt')
nps_chat = LazyCorpusLoader(
'nps_chat', NPSChatCorpusReader, r'(?!README|\.).*\.xml',
tag_mapping_function=simplify_wsj_tag)
pl196x = LazyCorpusLoader(
'pl196x', Pl196xCorpusReader, r'[a-z]-.*\.xml',
cat_file='cats.txt', textid_file='textids.txt')
ppattach = LazyCorpusLoader(
'ppattach', PPAttachmentCorpusReader, ['training', 'test', 'devset'])
# ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ and Brown portions
# 'ptb3', CategorizedBracketParseCorpusReader, r'(WSJ/\d\d/WSJ_\d\d|BROWN/C[A-Z]/C[A-Z])\d\d.MRG',
# cat_file='allcats.txt', tag_mapping_function=simplify_wsj_tag)
qc = LazyCorpusLoader(
'qc', StringCategoryCorpusReader, ['train.txt', 'test.txt'])
reuters = LazyCorpusLoader(
'reuters', CategorizedPlaintextCorpusReader, '(training|test).*',
cat_file='cats.txt')
rte = LazyCorpusLoader(
'rte', RTECorpusReader, r'(?!\.).*\.xml')
semcor = LazyCorpusLoader(
'semcor', XMLCorpusReader, r'brown./tagfiles/br-.*\.xml')
senseval = LazyCorpusLoader(
'senseval', SensevalCorpusReader, r'(?!\.).*\.pos')
shakespeare = LazyCorpusLoader(
'shakespeare', XMLCorpusReader, r'(?!\.).*\.xml')
sinica_treebank = LazyCorpusLoader(
'sinica_treebank', SinicaTreebankCorpusReader, ['parsed'],
tag_mapping_function=simplify_tag)
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.).*\.txt')
stopwords = LazyCorpusLoader(
'stopwords', WordListCorpusReader, r'(?!README|\.).*')
swadesh = LazyCorpusLoader(
'swadesh', SwadeshCorpusReader, r'(?!README|\.).*')
switchboard = LazyCorpusLoader(
'switchboard', SwitchboardCorpusReader)
timit = LazyCorpusLoader(
'timit', TimitCorpusReader)
toolbox = LazyCorpusLoader(
'toolbox', ToolboxCorpusReader, r'(?!.*(README|\.)).*\.(dic|txt)')
treebank = LazyCorpusLoader(
'treebank/combined', BracketParseCorpusReader, r'wsj_.*\.mrg',
tag_mapping_function=simplify_wsj_tag)
treebank_chunk = LazyCorpusLoader(
'treebank/tagged', ChunkedCorpusReader, r'wsj_.*\.pos',
sent_tokenizer=RegexpTokenizer(r'(?<=/\.)\s*(?![^\[]*\])', gaps=True),
para_block_reader=tagged_treebank_para_block_reader)
treebank_raw = LazyCorpusLoader(
'tr |
matthewzhenggong/fiwt | XbeeZBS2Test/recparse.py | Python | lgpl-3.0 | 4,619 | 0.023815 |
import argparse
import numpy as np
import scipy.io as syio
import struct
import time
def Get14bit(val) :
if val & 0x2000 :
return -(((val & 0x1FFF)^0x1FFF)+1)
else :
return val & 0x1FFF
class fileParser(object):
def __init__(self):
self.packHdr = struct.Struct(">BH")
self.packHdr = struct.Struct(">BH")
self.pack22 = struct.Struct(">B6H3H6HI6h")
self.pack33 = struct.Struct(">B4H4HI4h")
self.packA6 = struct.Struct(">BB4H16BI")
self.pack44 = struct.Struct(">B4HI")
self.head22 = np.array([ "T", "Enc1","Enc2","Enc3",
"GX","GY","GZ","AX","AY","AZ",
"Svo1","Svo2","Svo3","Svo4","Svo5","Svo6",
"Mot1","Mot2","Mot3","Mot4","Mot5","Mot6"
], dtype=np.object)
self.head33 = np.array([ "T", "Enc1","Enc2","Enc3","Enc4",
"Mot1","Mot2","Mot3","Mot4" ], dtype=np.object)
self.headA6 = np.array([ "T", "Svo1","Svo2","Svo3","Svo4", "Type" ],
dtype=np.object)
self.head44 = np.array([ "T", "ADC1","ADC2","ADC3","ADC4" ],
dtype=np.object)
def parse_data(self, data):
if data[0] == '\x22':
rslt = self.pack22.unpack(data)
Svo1 = rslt[1]
Svo2 = rslt[2]
Svo3 = rslt[3]
Svo4 = rslt[4]
Svo5 = rslt[5]
Svo6 = rslt[6]
Enc1 = rslt[7]
Enc2 = rslt[8]
Enc3 = rslt[9]
GX = Get14bit(rslt[10])*0.05
GY = Get14bit(rslt[11])*-0.05
GZ = Get14bit(rslt[12])*-0.05
AX = Get14bit(rslt[13])*-0.003333
AY = Get14bit(rslt[14])*0.003333
AZ = Get14bit(rslt[15])*0.003333
T = rslt[16]*1e-6
Mot1 = rslt[17]
Mot2 = rslt[18]
Mot3 = rslt[19]
Mot4 = rslt[20]
Mot5 = rslt[21]
Mot6 = rslt[22]
self.data22.append( [T, Enc1,Enc2,Enc3,
GX,GY,GZ,AX,AY,AZ,
Svo1,Svo2,Svo3,Svo4,Svo5,Svo6,
Mot1,Mot2,Mot3,Mot4,Mot5,Mot6] )
elif data[0] == '\x33':
rslt = self.pack33.unpack(data)
Svo1 = rslt[1]
Svo2 = rslt[2]
Svo3 = rslt[3]
Svo4 = rslt[4]
Enc1 = rslt[5]
Enc2 = rslt[6]
Enc3 = rslt[7]
Enc4 = rslt[8]
T = rslt[9]*1e-6
Mot1 = rslt[10]
Mot2 = rslt[11]
Mot3 = rslt[12]
Mot4 = rslt[13]
self.data33.append( [T, Enc1,Enc2,Enc3,Enc4,
Mot1,Mot2,Mot3,Mot4] )
elif data[0] == '\xa6':
rslt = self.packA6.unpack(data)
itype = rslt[1]
Svo1 = rslt[2]
Svo2 = rslt[3]
Svo3 = rslt[4]
Svo4 = rslt[5]
T = rslt[5+17]*1e-6
self.dataA6.append( [T, Svo1,Svo2,Svo3,Svo4,itype] )
elif data[0] == '\x44':
rslt = self.pack44.unpack(data)
ADC1 = rslt[1]
ADC2 = rslt[2]
ADC3 = rslt[3]
ADC4 = rslt[4]
T = rslt[5]*1e-6
self.data44.append( [T, ADC1,ADC2,ADC3,ADC4] )
def parse_file(self, filename):
self.data22 = []
self.data33 = []
self.data44 = []
self.dataA6 = []
with open(filename, 'rb') as f:
head = f.read(3)
while len(head) == 3:
header,length = self.packHdr.unpack(head)
data = f.read(length)
if len(data) == length:
self.parse_data(data)
else:
break
head = f.read(3)
self.data22 = np.array(self.data22)
self.data33 = np.array(self.data33)
self.dataA6 = np.array(self.dataA6)
self.data44 = np.array(self.data44)
| return {'data22':self.data22,'data33':self.data33,
'head22':self.head22,'head33':self.head33,
'headA6':self.headA6,'dataA6':self.dataA6,
'head44':self.head44,'data44':self.data44,
}
if __name__=='_ | _main__' :
parser = argparse.ArgumentParser(
prog='recparse',
description='parse rec data file')
parser.add_argument('filenames', metavar='file',
nargs='+', help='data filename')
args = parser.parse_args()
p = fileParser()
for filename in args.filenames :
mat_data = p.parse_file(filename)
syio.savemat(filename+'.mat', mat_data)
|
ratnania/pyccel | tests/codegen/scripts/recursive_functions.py | Python | mit | 155 | 0.058065 | #$ heade | r fact(int) results(int)
def fact(n):
if n == 0:
z = 1
return z
else:
z = n*fact(n-1)
return z
print(fact(5))
| |
DarkenNav/UnionFreeArts | WebApp/WebSiteUnionFreeArts/WebSiteUnionFreeArts/wsgi.py | Python | mit | 440 | 0 | "" | "
WSGI config for WebSiteUnionFreeArts project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"WebSiteUnionFreeArts.settings")
appli | cation = get_wsgi_application()
|
dario61081/koalixcrm | koalixcrm/crm/factories/factory_resource_price.py | Python | bsd-3-clause | 1,499 | 0.000667 | # -*- coding: utf-8 -*-
import factory
import datetime
from koalixcrm.crm.models import ResourcePrice
from koalixcrm.crm.factories.factory_resource import StandardResourceFactory
from koalixcrm.crm.factories.factory_unit import StandardUnitFactory
from koalixcrm.crm.factories.factory_currency import StandardCurrencyFactory
from koalixcrm.crm.factories.factory_customer_group import StandardCustomerGroupFactory
from koalixcrm.test_support_functions import make_date_utc
class StandardResourcePriceFactory(factory.django.DjangoModelFactory):
class Meta:
model = ResourcePrice
resource = factory.SubFactory(StandardResourceFactory)
unit = factory.SubFactory(StandardUnitFactory)
currency = factory.SubFactory(StandardCurrencyFactory)
customer_group = factory.SubFactory(StandardCustomerGroupFactory)
price = "100.50"
valid_from = make_date_utc(datetime.datetime(2018, 6, 15, 00))
valid_until = make_date_utc(datetime.datetime(2024, 6, 1 | 5, 00))
class HighResourcePriceFactory(factory. | django.DjangoModelFactory):
class Meta:
model = ResourcePrice
resource = factory.SubFactory(StandardResourceFactory)
unit = factory.SubFactory(StandardUnitFactory)
currency = factory.SubFactory(StandardCurrencyFactory)
customer_group = factory.SubFactory(StandardCustomerGroupFactory)
price = "250.50"
valid_from = make_date_utc(datetime.datetime(2018, 6, 15, 00))
valid_until = make_date_utc(datetime.datetime(2024, 6, 15, 00))
|
Manuel7AP/dobc_web_app | app/views/add_guest.py | Python | apache-2.0 | 676 | 0 | from flask import flash, redirect, render_template, url_for, request
from sqlalchemy.exc import IntegrityError
from app import app, db
from app.forms import AddGuestForm
from app.models import Guest
from app.util import flash_form_errors
@app.route('/guests/create', methods=[' | GET', 'POST'])
def add_guest():
add_guest_form = AddGuestForm()
if request.method == 'GET':
return render_template('add_guest.html', add_guest_form=add_guest_form)
if request.method == 'POST':
guest = Guest(add_guest_form.name.data, add_guest_form.message.data)
db.session.add(guest)
db.session.commit()
return redirect(url_for('vie | w_guests'))
|
haphaeu/yoshimi | EulerProject/233_clever.py | Python | lgpl-3.0 | 2,521 | 0.02142 | """
Project Euler - Problem 233
Circle passing through (0,0),(N,0),(0,N),(N,N)
this is a square - so the diameter of the circle
is N*Sqrt(2), and its centre is (N/2,N/2)
Circle equation is
(X-Xc)^2+(Y-Yc)^2=N^2/2 or
(x-N/2)^2 + (y-N/2)^2 = N^2/2
for a given N, the x domain Dx is
lower bound N/2 - N*sqrt(2)/2 = N/2*(1-sqrt(2))
upper bound N/2 + N*sqrt(2)/2 = N/2*(1+sqrt(2))
same domain applied for y, Dy
for a given x, the y is
(y-N/2)^2 = N^2/2 - (x-N/2)^2
y-N/2 = sqrt( N^2/2 - (x-N/2)^2 )
y = N/2 +/- sqrt( N^2/2 - (x-N/2)^2 )
hence
y1 = N/2 + sqrt( N^2/2 - (x-N/2)^2 )
y2 = N/2 - sqrt( N^2/2 - (x-N/2)^2 )
So, for a given integer N
1- iterate integer x in domain Dx
2- given x calculate y1 and y2 (floats)
3- check if integers y1 and y2 satisfy circle equation
(x-N/2)^2 + (y1-N/2)^2 - N^2/2 < eps
(x-N/2)^2 + (y2-N/2)^2 - N^2/2 < eps
"""
from math import sqrt
import sys
from time import time
st=time()
eps=1e-10
try:
N=int(sys.argv[1])
except:
N=int(1e7)
print "using N= | %d (%.3e)" % (N, N)
lowerBound = N/2*(1-sqrt(2))
upperBound = N/2*(1+sqrt(2))
domainRange = upperBound - lowerBound
domainRangePercentage=domainRange/100.
#prepare some variables to avoid re-calculate
N_over_2=N/2.0
N_sqrd_over_2=0.5*N**2
rootFinderDelta=sqrt(N)
rootFinderTol=1.0e-4
#iterate trhough domain x
ct=0
x=int(lowerBound)
while x<=N_over_2:
#given integer x, | calculate y1 as float
y1 = N_over_2 + sqrt( N_sqrd_over_2 - (x-N_over_2)**2 )
if abs(y1-int(y1))<eps:
#due to symmetry, always 2 points count
# +2 positive and negavtive y
# +2 either sides (left and right) of the circle
ct+=4
#progress (either one of the below)
print ct, x,
#sys.stdout.write("progress %.2f%%\r" % (float(x-lowerBound)/(domainRangePercentage)))
# === estimate next x ===
# line equation, tangent to (x,y1)
a = -(x-N_over_2)/(y1-N_over_2)
b = y1 - a * x
#root finder by delta
h0=-1.0
iters=0
delta=rootFinderDelta
while abs(h0)>rootFinderTol:
x+=delta
h1=a*x+b-1.0-N_over_2-sqrt(N_sqrd_over_2-(x-N_over_2)**2)
if h0*h1<0: delta*=-0.5
h0=h1
iters+=1
x=int(x)
print "next x is %d, with tol %f after %d iters" % (x, h0, iters)
if ct>420:
print "\ncanceled"
break
x+=1
print "\nf(%d)=%d (took %.3fs)" % (N, ct, (time()-st))
#output:
# f(10000000)=60 (took 10.702s)
|
dwt/simplesuper | setup.py | Python | isc | 1,671 | 0.003593 | #!/usr/bin/env python
# encoding: utf-8
# from __future__ import unicode_literals
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
def readme():
"Falls back to just file().read() on any error, because the conversion to rst is only really relevant when uploading the package to pypi"
from subprocess import CalledProcessError
try:
from subprocess import check_output
return check_output(['pandoc', '--from', 'markdown', '--to', 'rst', 'README.md']).decode('utf-8')
except (ImportError, OSError, CalledProcessError) as error:
print('pandoc is required to get the description as rst (as required to get nice rendering in pypi) - using the original markdown instead.',
'See http://johnmacfarlane.net/pandoc/')
return open(path.join(here, 'README.md')).read().decode('utf-8')
setup(
name='simplesuper',
description='Simpler way to call super methods without | all the repetition',
long_description=readme(),
version='1.0.9',
classifiers=[
"Programming Language :: Python :: 2",
"Topic :: Software Development",
"Topic :: Utilities",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: ISC License (ISCL)",
],
author='Martin Häcker, Robert Buchholz, Felix Schwarz',
author_email='mhaecker@mac.com, rb | u@rbu.sh, felix@schwarz-online.org',
license="ISC",
url='https://github.com/dwt/simplesuper',
keywords='python 2, super, convenience, api',
py_modules=['simplesuper'],
test_suite = "simplesuper",
)
|
REBradley/WineArb | winearb/reviews/urls.py | Python | bsd-3-clause | 1,074 | 0.001862 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.new_wine, #add review form
name='home'
),
url(
| regex=r'^new_review/$',
view=views.new_review, #actually add the review
name='new_review'
),
url(
regex=r'^review/user/(?P<username>\w+)/$',
view=views.user_review_list,
name='user_review_list'
),
url(
regex=r'^review/edit/(?P<review_id>[0-9]+)/$',
view=views.edit_ | review_form,
name='edit_review_form'
),
url(
regex=r'^review/edit_review/(?P<review_id>[0-9]+)/$',
view=views.edit_review,
name='edit_review'
),
url(
regex=r'^review/delete_review/(?P<review_id>[0-9]+)/$',
view=views.delete_review,
name='delete_review'
),
url(
regex=r'^detail/(?P<review_id>[0-9]+)/$',
view=views.wine_detail,
name='wine_detail'),
]
|
Delosari/dazer | bin/user_conf/ManageFlow.py | Python | mit | 2,633 | 0.01861 | def DataToTreat(Catalogue = 'WHT_observations'):
Catalogue_Dictionary = {}
if Catalogue == 'WHT_observations':
Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/'
Catalogue_Dictionary['Datatype'] = 'WHT'
Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/' + 'objects/'
Catalogue_Dictionary['Data_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/' + 'data/'
Catalogue_Dictionary['dataframe'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_observations/catalogue_df'
if Catalogue == 'WHT_HII_Galaxies':
Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/'
Catalogue_Dictionary['Datatype'] = 'WHT'
Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/' + 'Objects/SHOC579/'
Catalogue_Dictionary['Data_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/WHT_Catalogue_SulfurRegression/' + 'Data/'
if Catalogue == 'WHT_CandiatesObjects':
Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/WHT_CandiatesObjects/"
Catalogue_Dictionary['Datatype'] = "dr10"
Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/WHT_CandiatesObjects/"
if Catalogue == 'WHT_CandiatesObjectsFabian':
Catalogue_Dictionary['Folder'] = '/home/vital/Dropbox/Astrophysics/Data/Fabian_Catalogue/'
Catalogue_Dictionary['Datatype'] = "dr10"
Catalogue_Dictionary['Obj_Folder'] = '/home/vital/Dropbox/Astrophysics/Data/Fabian_Catalogue/'
if Catalogue == 'Marta_Catalogue':
Catalogue_Dictionary['Folder'] = "/home/vital/Dropbox/Astrophysics/Data/WHT_MartaCandidates_2016/"
Catalogue_Dictionary['Datatype'] = "dr10"
Catalogue_Dictionary['Obj_Folder'] = | "/home/vital/Dropbox/Astrophysics/Data/WHT_MartaCandidates_2016/Objects/"
if Catalogu | e == 'SDSS_Catalogue':
Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/Fabian_Catalogue/"
Catalogue_Dictionary['Datatype'] = "dr10"
Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/Fabian_Catalogue/"
if Catalogue == 'Testing_Pypeline':
Catalogue_Dictionary['Folder'] = "Dropbox/Astrophysics/Data/ToCompare/"
Catalogue_Dictionary['Datatype'] = "dr10"
Catalogue_Dictionary['Obj_Folder'] = "Dropbox/Astrophysics/Data/ToCompare/"
return Catalogue_Dictionary
|
astrofrog/ginga | setup.py | Python | bsd-3-clause | 1,354 | 0.024372 | #! /usr/bin/env python
#
from distutils.core import setup
from g | inga.version import version
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "ginga",
version = version,
author = "Eric Jeschke",
author_email = "eric@naoj.org",
description = ("An astronomical (FITS) image viewer and toolkit."),
long_description = read('README.txt'),
license = "BSD",
| keywords = "FITS image viewer astronomy",
url = "http://ejeschke.github.com/ginga",
packages = ['ginga', 'ginga.gtkw', 'ginga.gtkw.plugins', 'ginga.gtkw.tests',
'ginga.qtw', 'ginga.qtw.plugins', 'ginga.qtw.tests',
'ginga.misc', 'ginga.misc.plugins',
'ginga.icons', 'ginga.util',
'ginga.doc'],
package_data = { 'ginga.icons': ['*.ppm', '*.png'],
'ginga.doc': ['manual/*.html'],
'ginga.gtkw': ['gtk_rc'],
},
data_files = [('', ['LICENSE.txt', 'README.txt'])],
scripts = ['scripts/ginga'],
classifiers = [
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
|
thaskell1/volatility | volatility/plugins/linux/lsmod.py | Python | gpl-2.0 | 25,003 | 0.014358 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import re, os, struct
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.common as linux_common
class linux_lsmod(linux_common.AbstractLinuxCommand):
"""Gather loaded kernel modules"""
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('SECTIONS', short_option = 'T', default = None, help = 'show section addresses', action = 'store_true')
self._config.add_option('PARAMS', short_option = 'P', default = None, help = 'show module parameters', action = 'store_true')
self._config.add_option('BASE', short_option = 'b', default = None, help = 'Dump driver with BASE address (in hex)', action = 'store', type = 'int')
self._config.add_option('IDC', short_option = 'c', default = None, help = 'Path to IDC file to be created for module', action = 'store', type = 'str')
def _get_modules(self):
if self._config.BASE:
module_address = int(self._config.BASE)
yield obj.Object("module", offset = module_address, vm = self.addr_space)
else:
modules_addr = self.addr_space.profile.get_symbol("modules")
modules = obj.Object("list_head", vm = self.addr_space, offset = modules_addr)
# walk the modules list
for module in modules.list_of_type("module", "list"):
yield module
def calculate(self):
linux_common.set_plugin_members(self)
for module in self._get_modules():
if self._config.PARAMS:
if not hasattr(module, "kp"):
debug.error("Gathering module parameters is not supported in this profile.")
params = module.get_params()
else:
params = ""
if self._config.SECTIONS:
sections = module.get_sections()
else:
sections = []
yield (module, sections, params)
def render_text(self, outfd, data):
for (module, sections, params) in data:
if self._config.IDC:
fd = open(self._config.IDC, "w")
fd.write("#include <idc.idc>\nstatic main(void) {\n")
for (sname, saddr) in module.get_symbols():
fd.write(" MakeDword(0x{0:08X});\n".format(saddr))
fd.write(" MakeName(0x{0:08X}, \"{1}\");\n".format(saddr, sname))
fd.write("}")
outfd.write("{2:x} {0:s} {1:d}\n".format(module.name, module.init_size + module.core_size, module.obj_offset))
# will be empty list if not set on command line
for sect in sections:
outfd.write("\t{0:30s} {1:#x}\n".format(sect.sect_name, sect.address))
# will be "" if not set, otherwise will be space seperated
if params != "":
for param in params.split():
outfd.write("\t{0:100s}\n".format(param))
def get_module(self, name):
ret = None
for (module, _, _) in self.calculate():
if str(module.name) == name:
ret = module
break
return ret
# returns a list of tuples of (name, .text start, .text end) for each module
# include_list can contain a list of only the modules wanted by a plugin
def get_modules(self, include_list = None):
if not include_list:
include_list = []
ret = []
for (module, _sections, _params) in self.calculate():
if len(include_list) == 0 or str(module.name) in include_list:
start = module.module_core
end = start + module.core_size
ret.append(("%s" % module.name, start, end))
return ret
class linux_moddump(linux_common.AbstractLinuxCommand):
"""Extract loaded kernel modules"""
def __init__(self, config, *args, **kwargs):
self.name_idx = 1
self.idc_started = False
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
config.add_option('D | UMP-DIR', short_option = 'D', default = None,
help = 'Directory in which to dump the files',
action = 'store', type = 'string')
config.add_option('REGEX', short_option = 'r',
help = 'Dump modules matching REGEX',
action = 'store', type = 'string')
config.add_option('IGNORE-CASE', short_option = 'i',
help = 'Ignore case in pattern match',
action = 'store_ | true', default = False)
config.add_option('BASE', short_option = 'b', default = None,
help = 'Dump driver with BASE address (in hex)',
action = 'store', type = 'int')
def calculate(self):
linux_common.set_plugin_members(self)
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: {0}'.format(e))
if self._config.BASE:
module_address = int(self._config.BASE)
yield obj.Object("module", offset = module_address, vm = self.addr_space)
else:
# walk the modules list
modules_addr = self.addr_space.profile.get_symbol("modules")
modules = obj.Object("list_head", vm = self.addr_space, offset = modules_addr)
for module in modules.list_of_type("module", "list"):
if self._config.REGEX:
if not mod_re.search(str(module.name)):
continue
yield module
def _get_header_64(self, load_addr, sect_hdr_offset, num_sects):
e_ident = "\x7f\x45\x4c\x46\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
e_type = "\x01\x00" # relocateble
e_machine = "\x03\x00"
e_version = "\x01\x00\x00\x00"
e_entry = "\x00" * 8
e_phoff = "\x00" * 8
e_shoff = struct.pack("<Q", sect_hdr_offset)
e_flags = "\x00\x00\x00\x00"
e_ehsize = "\x40\x00"
e_phentsize = "\x00\x00"
e_phnum = "\x00\x00"
e_shentsize = "\x40\x00"
e_shnum = struct.pack("<H", num_sects + 1) # this works as we stick the seciton we create at the end
e_shstrndx = struct.pack("<H", num_sects)
header = e_ident + e_type + e_machine + e_version + e_entry + e_phoff + e_shoff + e_flags
header = header + e_ehsize + e_phentsize + e_phnum + e_shentsize + e_shnum + e_shstrndx
if len(header) != 64:
debug.error("BUG: ELF header not bytes. %d" % len(header))
return header
def _get_header_32(self, load_addr, sect_hdr_offset, num_sects):
e_ident = "\x7f\x45\x4c\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
e_type = |
K-3D/k3d | share/k3d/scripts/RenderManScript/tribble.py | Python | gpl-2.0 | 1,686 | 0.020166 | #python
# Load this script into a RenderManScript node to create
# what is either a Tribble or a really bad-hair-day ...
import k3d
k3d.check_node_environment(context, "RenderManScript")
impor | t sys
import ri
from ri import *
from random import *
from cgtypes import vec3
from noise import vsnoise
from sl import mix
message = """You're probably trying to run this script manually, which won't work - this script is meant to be loaded into a RenderManScript node, where it will be run at render-time.
Use the Create > RenderMan > RenderManScript menu item to create the node, then load this file into its Script property."""
if not context.has_ke | y("archive"):
k3d.ui.error_message(message)
raise
# Redirect output to our RIB archive
ri._ribout = open(str(context.archive), "w")
body_size = 5
lumpyness = 1
hair_length = 2
hair_count = 10000
hair_wavyness = 1
control_point_counts = []
control_points = []
widths = []
seed(12345)
for i in range(hair_count):
control_point_counts.append(4)
v = vec3(random() - 0.5, random() - 0.5, random() - 0.5).normalize()
p1 = v * body_size
p1 += vsnoise(p1) * lumpyness
p4 = p1 + v * hair_length
p4 += vsnoise(p4) * hair_wavyness
p2 = mix(p1, p4, 0.2)
p2 += vsnoise(p2)
p3 = mix(p1, p4, 0.8)
p3 += vsnoise(p3)
control_points.append(p1)
control_points.append(p2)
control_points.append(p3)
control_points.append(p4)
widths.append(0.08)
widths.append(0.01)
RiSurface("k3d_hair")
RiCurves(RI_CUBIC, control_point_counts, RI_NONPERIODIC, "P", control_points, "width", widths)
ri._ribout.flush()
context.render_state.use_shader(k3d.share_path() / k3d.filesystem.generic_path("shaders/surface/k3d_hair.sl"))
|
skbly7/serc | website/lab/migrations/0016_auto_20160311_1224.py | Python | mit | 582 | 0.001718 | # -*- coding: utf-8 -*-
# Gen | erated by Django 1.9.4 on 2016-03-11 12:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lab', '0015_auto_20160311_1221'),
]
operations = [
migrations.AlterField(
model_name='public | ation',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='publication_author', to='lab.ShortNames'),
),
]
|
mF2C/COMPSs | tests/sources/python/0_multireturn/src/multireturn.py | Python | apache-2.0 | 919 | 0.006529 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench Tasks
========================
"""
# Imports
import unittest
from modules.testMultiRe | turnFunctions import testMulti | ReturnFunctions
from modules.testMultiReturnInstanceMethods import testMultiReturnInstanceMethods
from modules.testMultiReturnIntFunctions import testMultiReturnIntFunctions
from modules.testMultiReturnIntInstanceMethods import testMultiReturnIntInstanceMethods
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(testMultiReturnFunctions)
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(testMultiReturnInstanceMethods))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(testMultiReturnIntFunctions))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(testMultiReturnIntInstanceMethods))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
edenzik/elastiCity | api/yelp/freebase_key_needed_final.py | Python | gpl-3.0 | 2,478 | 0.006457 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from json import loads
from pprint import pprint
from sys import argv
import urllib
class FREEBASE_KEY():
'''
please input the api_key here
'''
api_key = 'AIzaSyBWsQpGo34Lk0Qa3wD0kjW5H1Nfb2m5eaM'
def get_city_id(city_name):
query = city_name
service_url = 'https://www.googleapis.com/freebase/v1/search'
params = {
'query': query,
'key': FREEBASE_KEY.api_key,
}
url = service_url + '?' + urllib.urlencode(params)
response = loads(urllib.urlopen(url).read())
return response['result'][0]['id']
"""
This function query the freebase and get the topic id of input city
"""
def get_city_attractions(city_name):
topic_id = get_city_id(city_name)
service_url = 'https://www.googleapis.com/freebase/v1/topic'
params = {
'filter': '/location/location',
'key': FREEBASE_KEY.api_key,
}
url = service_url + topic_id + '?' + urllib.urlencode(params)
topic = loads(urllib.urlopen(url).read())
return topic
"""
Notic:
Eden, please note that if you need the attractions, call: get_city_attractions(topic_id)['property']['/location/location/contains']
geo info call: get_city_attractions(topic_id)['/location/location/geolocation']
"""
def get_freebase_info(city_name):
'''
this function is used to extract the exact info we want
'''
freebase_dic = {}
city_data = get_city_attractions(city_name)
# freebase_dic['attractions'] = city_data['property']['/location/location/contains']
return city_data['property']['/location/location/contains']
# freebase_dic['latitude'] = city_data['property']['/location/location/geolocation']['values'][0]['property']['/location/geocode/latitude']['values'][0]['value']
# freebase_dic['longitude'] = city_data['property']['/location/location/geolocation']['values'][0]['property']['/location/geocode/longitude']['values'][0]['value']
# return freebase_dic
def main(location=None):
city_name = location
if not location:
city_name = argv[1]
output = get_freebase_info(city_name)
data = []
for value in output['values']:
data.append( value['text'])
return '<br /> '.join(data).encode('utf-8')
if __name__ == '__ | main__':
'''
just call the function get_freebase_info(city_name) and input the city_name, here is the sample
'''
| print main()
|
miltmobley/PatchTools | patchtools/lib/matcher.py | Python | apache-2.0 | 4,212 | 0.009022 | # -*- coding: utf-8 -*-
'''
Created on Apr 13, 2014
@copyright 2014, Milton C Mobley
Select strings based on caller components: prefixes, suffixes and substrings.
Regular expression matching is also supported.
Note that some pa | tch and kernel files have utf-8 chars with code > 127. Some of
these codes are no | t legal utf-8 start byte codes. See functions.py for the file
read, write handling.
'''
import re
from inspect import isfunction
from patchtools.lib.ptobject import PTObject
from patchtools.lib.exceptions import PatchToolsError, PT_ParameterError
#++
class Matcher(PTObject):
""" Implement filter selection of strings
"""
#--
#++
def __init__(self, params):
""" Constructor
Args:
params (dict): parameters
match (list, optional): string match pattern(s)
prefix (list, optional): string start pattern(s)
suffix (list, optional): string end pattern(s)
substr (list, optional): substring pattern(s)
regexp (list, optional): regular expression pattern(s)
funcs (list, optional): callback function(s)
Raises:
PT_ParameterError on invalid parameters
Notes:
At least one option must be specified for the filter to have an effect.
Regular expression pattern strings should be coded using the r"..." string form.
"""
#--
self.name = 'Matcher'
if (not isinstance(params, dict)):
raise PT_ParameterError(self.name, 'params')
self.prefix_patterns = self._check_optional_param(params, 'prefix', list, None)
self.suffix_patterns = self._check_optional_param(params, 'suffix', list, None)
self.substr_patterns = self._check_optional_param(params, 'substr', list, None)
self.match_patterns = self._check_optional_param(params, 'match', list, None)
regexp = self._check_optional_param(params, 'regexp', list, None)
if (isinstance(regexp, list)):
try:
self.regex_patterns = [re.compile(s) for s in regexp]
except Exception as e:
raise PT_ParameterError(self.name, str(e))
else:
self.regex_patterns = None
if ('funcs' in params):
cbs = params['funcs']
for cb in cbs:
if (not isfunction(cb)):
raise PatchToolsError(self.name, 'callback must be a function')
self.callbacks = cbs
else:
self.callbacks = None
#++
def __call__(self, string):
""" Try to match string to stored filter
Args:
string (string): string to match
Returns:
text of the matching pattern, or None
"""
#--
if ('compatible = "ti,am3359-tscadc"' in string):
pass
if (self.match_patterns is not None):
for pattern in self.match_patterns:
if (string == pattern):
return pattern
if (self.prefix_patterns is not None):
for pattern in self.prefix_patterns:
if (string.startswith(pattern)):
return pattern
if (self.suffix_patterns is not None):
for pattern in self.suffix_patterns:
if (string.endswith(pattern)):
return pattern
if (self.substr_patterns is not None):
for pattern in self.substr_patterns:
if (pattern in string):
return pattern
if (self.regex_patterns is not None):
for pattern in self.regex_patterns:
ret = pattern.match(string)
if (ret is not None):
return str(pattern)
if (self.callbacks is not None):
for callback in self.callbacks:
if callback(string):
return str(callback)
return None |
ayseyo/oclapi | django-nonrel/ocl/manage.py | Python | mpl-2.0 | 318 | 0 | #!/ | usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oclapi.settings.l | ocal')
os.environ.setdefault('DJANGO_CONFIGURATION', 'Local')
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
moul/splitfs | fusepy/fuse.py | Python | mit | 23,924 | 0.00163 | # Copyright (c) 2008 Giorgos Verigakis <verigak@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from functools import partial
from os import strerror
from platform import machine, system
from signal import signal, SIGINT, SIG_DFL
from stat import S_IFDIR
from traceback import print_exc
__version__ = '1.1'
_system = system()
_machine = machine()
if _system == 'Darwin':
_libfuse_path = find_library('fuse4x') or find_library('osxfuse') or \
find_library('fuse')
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse = CDLL(_libfuse_path)
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, | 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64 | )]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_c |
rcdailey/videosort | lib/guessit/transfo/guess_episodes_rexps.py | Python | gpl-3.0 | 13,659 | 0.004759 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.patterns import sep, build_or_pattern
from guessit.containers import PropertiesContainer, WeakValidator, NoValidator, ChainedValidator, DefaultValidator, \
FormatterValidator
from guessit.patterns.numeral import numeral, digital_numeral, parse_numeral
import re
class GuessEpisodesRexps(Transformer):
def __init__(self):
Transformer.__init__(self, 20)
range_separators = ['-', 'to', 'a']
discrete_separators = ['&', 'and', 'et']
of_separators = ['of', 'sur', '/', '\\']
season_words = ['seasons?', 'saisons?', 'series?']
episode_words = ['episodes?']
season_markers = ['s']
episode_markers = ['e', 'ep']
discrete_sep = sep
for range_separator in range_separators:
discrete_sep = discrete_sep.replace(range_separator, '')
discrete_separators.append(discrete_sep)
all_separators = list(range_separators)
all_separators.extend(discrete_separators)
self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False)
range_separators_re = re.compile(build_or_pattern(range_separators), re.IGNORECASE)
discrete_separators_re = re.compile(build_or_pattern(discrete_separators), re.IGNORECASE)
all_separators_re = re.compile(build_or_pattern(all_separators), re.IGNORECASE)
of_separators_re = re.compile(build_or_pattern(of_separators, escape=True), re.IGNORECASE)
season_words_re = re.compile(build_or_pattern(season_words), re.IGNORECASE)
episode_words_re = re.compile(build_or_pattern(episode_words), re.IGNORECASE)
season_markers_re = re.compile(build_or_pattern(season_markers), re.IGNORECASE)
episode_markers_re = re.compile(build_or_pattern(episode_markers), re.IGNORECASE)
def list_parser(value, propertyListName, discrete_separators_re=discrete_separators_re, range_separators_re=range_separators_re, allow_discrete=False, fill_gaps=False):
discrete_elements = filter(lambda x: x != '', discrete_separators_re.split(value))
discrete_elements = [x.strip() for x in discrete_elements]
proper_discrete_elements = []
i = 0
while i < len(discrete_elements):
if i < len(discrete_elements) - 2 and range_separators_re.match(discrete_elements[i+1]):
proper_discrete_elements.append(discrete_elements[i] + discrete_elements[i+1] + discrete_elements[i+2])
i += 3
else:
match = range_separators_re.search(discrete_elements[i])
if match and match.start() == 0:
proper_discrete_elements[i-1] = proper_discrete_elements[i-1] + discrete_elements[i]
elif match and match.end() == len(discrete_elements[i]):
proper_discrete_elements.append(discrete_elements[i] + discrete_elements[i + 1])
else:
proper_discrete_elements.append(discrete_elements[i])
i += 1
discrete_elements = proper_discrete_elements
ret = []
for discrete_element in discrete_elements:
range_values = filter(lambda x: x != '', range_separators_re.split(discrete_element))
range_values = [x. | strip() for x in range_values]
if len(range_values) > 1:
for x in range(0, len(range_values) - 1):
start_range_ep = parse_numeral(range_values[x])
end_range_ep = parse_numeral(range_values[x+1])
for range_ep in range(start_range_ep, end_range_ep + 1):
if range_ep not in ret:
ret.append(range_ep)
| else:
discrete_value = parse_numeral(discrete_element)
if discrete_value not in ret:
ret.append(discrete_value)
if len(ret) > 1:
if not allow_discrete:
valid_ret = []
# replace discrete elements by ranges
valid_ret.append(ret[0])
for i in range(0, len(ret) - 1):
previous = valid_ret[len(valid_ret) - 1]
if ret[i+1] < previous:
pass
else:
valid_ret.append(ret[i+1])
ret = valid_ret
if fill_gaps:
ret = list(range(min(ret), max(ret) + 1))
if len(ret) > 1:
return {None: ret[0], propertyListName: ret}
if len(ret) > 0:
return ret[0]
return None
def episode_parser_x(value):
return list_parser(value, 'episodeList', discrete_separators_re=re.compile('x', re.IGNORECASE))
def episode_parser_e(value):
return list_parser(value, 'episodeList', discrete_separators_re=re.compile('e',re.IGNORECASE), fill_gaps=True)
def episode_parser(value):
return list_parser(value, 'episodeList')
def season_parser(value):
return list_parser(value, 'seasonList')
class ResolutionCollisionValidator(object):
def validate(self, prop, string, node, match, entry_start, entry_end):
return len(match.group(2)) < 3 #limit
self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P<season>' + numeral + ')' + sep + '?' + season_words_re.pattern + '?)', confidence=1.0, formatter=parse_numeral)
self.container.register_property(None, r'(' + season_words_re.pattern + sep + '?(?P<season>' + digital_numeral + '(?:' + sep + '?' + all_separators_re.pattern + sep + '?' + digital_numeral + ')*)' + sep + '?' + season_words_re.pattern + '?)' + sep, confidence=1.0, formatter={None: parse_numeral, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), FormatterValidator('season', lambda x: len(x) > 1 if hasattr(x, '__len__') else False)))
self.container.register_property(None, r'(' + season_markers_re.pattern + '(?P<season>' + digital_numeral + ')[^0-9]?' + sep + '?(?P<episodeNumber>(?:e' + digital_numeral + '(?:' + sep + '?[e-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_e, 'season': season_parser}, validator=NoValidator())
#self.container.register_property(None, r'[^0-9]((?P<season>' + digital_numeral + ')[^0-9 .-]?-?(?P<episodeNumber>(?:x' + digital_numeral + '(?:' + sep + '?[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser_x, 'season': season_parser}, validator=ChainedValidator(DefaultValidator(), ResolutionCollisionValidator()))
self.container.register_property(None, sep + r'((?P<season>' + digital_numeral + ')' + sep + '' + '(?P<episodeNumber>(?:x' + sep + digital_numeral + '(?:' + sep + '[x-]' + digital_numeral + ')*)))', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': e |
smmribeiro/intellij-community | python/testData/quickFixes/PyMakeFunctionReturnTypeQuickFixTest/lambda.py | Python | apache-2.0 | 127 | 0.070866 | def func() -> int:
return <warning descr="Expected type 'int', got '(x: Any) -> int' instead">l | amb | da x: 42<caret></warning> |
open-power-ref-design-toolkit/os-services | osa/dbaas_ui/dbaas_ui/shortcuts/workflows/workflows.py | Python | apache-2.0 | 16,518 | 0.000061 | # Copyright 2017, IBM US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api as dash_api
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from openstack_dashboard.dashboards.project.instances.workflows \
import create_instance as dash_create_instance
from openstack_dashboard.api.glance import glanceclient as glance_client
from trove_dashboard import api as trove_api
LOG = logging.getLogger(__name__)
def parse_datastore_and_version_text(datastore_and_version):
if datastore_and_version:
datastore, datastore_version = datastore_and_version.split('-', 1)
return datastore.strip(), datastore_version.strip()
return None, None
class SetInstanceDetailsAction(workflows.Action):
# Hide availability zone (but keep it so we have a value to retrieve)
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
widget=forms.HiddenInput(), # Hide availability zone for now
required=False)
name = forms.CharField(max_length=80, label=_("Instance Name"))
volume = forms.IntegerField(label=_("Volume Size"),
min_value=0,
initial=1,
help_text=_("Size of the volume in GB."))
datastore = forms.ChoiceField(
label=_("Datastore"),
help_text=_("Type and version of datastore."),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'datastore'
}))
class Meta(object):
name = _("Details")
help_text_template = "project/databases/_launch_details_help.html"
def clean(self):
datastore_and_version = self.data.get("datastore", None)
if not datastore_and_version:
msg = _("You must select a datastore type and version.")
self._errors["datastore"] = self.error_class([msg])
else:
datastore, datastore_version = parse_datastore_and_version_text(
binascii.unhexlify(datastore_and_version))
field_name = self._build_flavor_field_name(datastore,
datastore_version)
flavor = self.data.get(field_name, None)
if not flavor:
msg = _("You must select a flavor.")
self._errors[field_name] = self.error_class([msg])
return self.cleaned_data
def handle(self, request, context):
datastore_and_version = context["datastore"]
if datastore_and_version:
datastore, datastore_version = parse_datastore_and_version_text(
binascii.unhexlify(context["datastore"]))
field_name = self._build_flavor_field_name(datastore,
datastore_version)
flavor = self.data[field_name]
if flavor:
context["flavor"] = flavor
return context
return None
@memoized.memoized_method
def availability_zones(self, request):
try:
return dash_api.nova.availability_zone_list(request)
except Exception:
LOG.exception("Exception while obtaining availablity zones")
self._availability_zones = []
def populate_availability_zone_choices(self, request, context):
try:
zones = self.availability_zones(request)
except Exception:
zones = []
redirect = reverse('horizon:dbaas_ui:index')
exceptions.handle(request,
_('Unable to retrieve availability zones.'),
redirect=redirect)
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
@memoized.memoized_method
def datastore_flavors(self, request, datastore_name, datastore_version):
try:
return trove_api.trove.datastore_flavors(
request, datastore_name, datastore_version)
except Exception:
LOG.exception("Exception while obtaining flavors list")
redirect = reverse("horizon:dbaas_ui:index")
exceptions.handle(request,
_('Unable to obtain flavors.'),
redirect=redirect)
@memoized.memoized_method
def datastores(self, request):
try:
return trove_api.trove.datastore_list(request)
except Exception:
LOG.exception("Exception while obtaining datastores list")
self._datastores = []
@memoized.memoized_method
def datastore_versions(self, request, datastore):
try:
return trove_api.trove.datastore_version_list(request, datastore)
except Exception:
LOG.exception("Exception while obtaining datastore version list")
self._datastore_versions = []
def glance_image_ids(self, request):
try:
image_ids = []
image_iter = glance_client(request).images.list()
for image in image_iter:
image_ids.append(image.id)
return image_ids
except Exception:
LOG.exception("Exception while obtaining glance image list")
def populate_datastore_choices(self, request, context):
choices = ()
datastores = self.datastores(request)
image_ids = self.glance_image_ids(request)
for ds in datastores or []:
versions = self.datastore_versions(request, ds.name)
if versions:
# only add to choices if datastore has at least one version
version_choices = ()
for v in versions:
if hasattr(v, 'active') and not v.active:
| LOG.error("Invalid datastore (%s) and version (%s)"
". Version is not active.", ds.name, v. | name)
continue
if hasattr(v, 'image') and v.image not in image_ids:
LOG.error("Invalid datastore (%s) and version (%s)"
". Version does not have an image "
"associated with it", ds.name, v.name)
continue
selection_text = self._build_datastore_display_text(
ds.name, v.name)
widget_text = self._build_widget_field_name(ds.name,
v.name)
version_choices = (version_choices +
((widget_text, selection_text),))
self._add_datastore_flavor_field(request, ds.name, v.name)
choices = choices + version_choices
return choices
def _add_datastore_flavor_field(self,
request,
datastore,
datastore_version):
name = self._build_widget_f |
Yangqing/caffe2 | scripts/get_python_cmake_flags.py | Python | apache-2.0 | 1,130 | 0.000885 | ## @package get_python_cmake_flags
# Module scripts.get_python_cmake_flags
##############################################################################
# Use this script to find your preferred python installation.
##############################################################################
#
# You can use the following to build with your preferred version of python
# if your installation is not being properly detected by CMake.
#
# mkdir -p build && cd build
# cmake $(p | ython ../scripts/get_python_libs.py) ..
# make
#
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils import sysconfig
import os
import sys
import platform
# Flags to print to stdout
flags = ''
inc = sysconfig.get_python_inc( | )
lib = sysconfig.get_config_var("LIBDIR")
# macOS specific
if sys.platform == "darwin":
lib = os.path.dirname(lib) + '/Python'
if os.path.isfile(lib):
flags += '-DPYTHON_LIBRARY={lib} '.format(lib=lib)
if os.path.isfile(inc + '/Python.h'):
flags += '-DPYTHON_INCLUDE_DIR={inc} '.format(inc=inc)
print(flags, end='')
|
Sikilabs/sikilabs | sikilabs/wsgi.py | Python | mit | 493 | 0 | " | ""
WSGI config for sikilabs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from mezzanine.utils.conf import real_project_name
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"%s.settings" % real_project_name("sikilabs"))
application = get_wsgi_ | application()
|
charany1/googlecl | src/googlecl/picasa/service.py | Python | mit | 14,137 | 0.006437 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service details and instances for the Picasa service."""
from __future__ import with_statement
__author__ = 'tom.h.miller@gmail.com (Tom Miller)'
import logging
import os
import urllib
import time
import gdata.photos
from gdata.photos.service import PhotosService, GooglePhotosException
import googlecl
import googlecl.base
import googlecl.service
import googlecl.picasa
import googlecl.calendar.date
# Shortening the names of these guys.
safe_encode = googlecl.safe_encode
safe_decode = googlecl.safe_decode
LOG = logging.getLogger(googlecl.picasa.LOGGER_NAME)
SUPPORTED_VIDEO_TYPES = {'wmv': 'video/x-ms-wmv',
'avi': 'video/avi',
'3gp': 'video/3gpp',
'mov': 'video/quicktime',
'qt': 'video/quicktime',
'mp4': 'video/mp4',
'mpa': 'video/mpeg',
'mpe': 'video/mpeg',
'mpeg': 'video/mpeg',
'mpg': 'video/mpeg',
'mpv2': 'video/mpeg',
'mpeg4': 'video/mpeg4',}
# XXX gdata.photos.service contains a very strange check against (outdated)
# allowed MIME types. This is a hack to allow videos to be uploaded.
# We're creating a list of the allowed video types stripped of the initial
# 'video/', eliminating duplicates via set(), then converting to tuple()
# since that's what gdata.photos.service uses.
gdata.photos.service.SUPPORTED_UPLOAD_TYPES += \
tuple(set([vtype.split('/')[1] for vtype in SUPPORTED_VIDEO_TYPES.values()]))
DOWNLOAD_VIDEO_TYPES = {'swf': 'application/x-shockwave-flash',
'mp4': 'video/mpeg4',}
class PhotosServiceCL(PhotosService, googlecl.service.BaseServiceCL):
"""Extends gdata.photos.service.PhotosService for the command line.
This class adds some features focused on using Picasa via an installed app
with a command line interface.
"""
def __init__(self, config):
"""Constructor."""
PhotosService.__init__(self)
googlecl.service.BaseServiceCL.__init__(self,
googlecl.picasa.SECTION_HEADER,
config)
def build_entry_list(self, user='default', titles=None, query=None,
force_photos=False, photo_title=None):
"""Build a list of entries of either photos or albums.
If no title is specified, entries will be of photos matching the query.
If no query is specified, entries will be of albums matching the title.
If both title and query are specified, entries will be of photos matching
the query that are also in albums matching the title.
Keyword arguments:
user: Username of the owner of the albums / photos (Default 'default').
titles: list Titles of the albums (Default None).
query: Query for photos, url-encoded (Default None).
force_photos: If true, returns photo entries, even if album entries would
typically be returned. The entries will be for all photos
in each album.
photo_title: Title of the photo(s) to return. Default None for all photos.
Returns:
A list of entries, as specified above.
"""
album_entry = []
if titles[0] or not(titles[0] or query):
album_entry = self.GetAlbum(user=user, titles=titles)
if photo_title or query or force_photos:
uri = '/data/feed/api/user/' + user
if query and not album_entry:
entries = self.GetEntries(uri + '?kind=photo&q=' + query, photo_title)
else:
entries = []
uri += '/albumid/%s?kind=photo'
if query:
uri += '&q=' + query
for album in album_entry:
photo_entries = self.GetEntries(uri % album.gphoto_id.text,
photo_title)
entries.extend(photo_entries)
else:
entries = album_entry
return entries
def create_album(self, title, summary, access, date):
"""Create photo album
Args:
title: Title of the album.
summary: Summary or description of the album.
access: Access level string. See the picasa package __init__ file for
valid values.
date: Date on the album, as a string. If eveluates to False, uses today.
Returns:
AlbumEntry of newly created album.
"""
if date:
parser = googlecl.calendar.date.DateParser()
date = parser.determine_day(date, shift_dates=False)
if date:
timestamp = time.mktime(date.timetuple())
timestamp_ms = '%i' % int((timestamp * 1000))
else:
LOG.error('Could not parse date %s. (Picasa only takes day info)' %
date)
timestamp_ms = ''
else:
timestamp_ms = ''
access = googlecl.picasa._map_access_string(access)
return self.InsertAlbum(title=title, summary=summary,
access=access,
timestamp=timestamp_ms)
CreateAlbum = create_album
def download_album(self, base_path, user, video_format='mp4', titles=None,
photo_title=None):
"""Download an album to the local host.
Keyword arguments:
base_path: Path on the filesystem to copy albums to. Each album will
be stored in base_path/<album title>. If base_path does not
exist, it and each non-existent parent directory will be
created.
user: User whose albums are being retrieved. (Default 'default')
titles: list or string Title(s) that the album(s) should have.
Default None, for all albums.
"""
def _get_download_info(photo_or_video, video_format):
"""Get download link and extension for photo or video.
video_format must be in DOWNLOAD_VIDEO_TYPES.
Returns:
(url, extension)
"""
wanted_content = None
for content in photo_or_video.media.content:
if content.medium == 'image' and not wanted_content:
wanted_content = content
elif content.type == DOWNLOAD_VID | EO_TYPES[ | video_format]:
wanted_content = content
if not wanted_content:
LOG.error('Did not find desired medium!')
LOG.debug('photo_or_video.media:\n' + photo_or_video.media)
return None
elif wanted_content.medium == 'image':
url = googlecl.picasa.make_download_url(photo_or_video.content.src)
mimetype = photo_or_video.content.type
extension = mimetype.split('/')[1]
else:
url = wanted_content.url
extension = video_format
return (url, extension)
# End _get_download_info
if not user:
user = 'default'
entries = self.GetAlbum(user=user, titles=titles)
if video_format not in DOWNLOAD_VIDEO_TYPES.keys():
LOG.error('Unsupported video format: ' + video_format)
LOG.info('Try one of the following video formats: ' +
str(DOWNLOAD_VIDEO_TYPES.keys())[1:-1])
video_format = 'mp4'
LOG.info('Downloading videos as ' + video_format)
for album in entries:
album_path = os.path.join(base_path, safe_decode(album.title.text))
album_concat = 1
if os.path.exists(album_path):
base_album_path = album_path
while os.path.exists(album_path):
album_path = base_album_path + '-%i' % album_concat
album_concat += 1
os.makedirs(album_path)
uri = ('/data/feed/api/user/%s/albumid/%s?kind=photo' %
(user, album.gphoto_id.text))
|
andrewtron3000/jampy | generator_matrix.py | Python | bsd-3-clause | 4,610 | 0.015835 | import random
import time
import sys
import Csound
import subprocess
import base64
import hashlib
import matrixmusic
csd = None
oscillator = None
buzzer = None
voice = None
truevoice = None
song_publisher = None
def add_motif(instrument, req):
global csd
time = req.motif_start_time
for note in req.score:
if note != "P":
csd.score(instrument.note(time,
req.note_duration,
note,
req.motif_amplitude))
time += req.internote_delay
def handle_create_song(req):
global csd, oscillator, buzzer, voice
global song_publisher
s = 'temp'
csd = Csound.CSD('%s.csd' % s)
csd.orchestra(oscillator, buzzer, voice)
for motif in req.motifs:
if motif.instrument == 'oscil':
add_motif(oscillator, motif)
elif motif.instrument == 'buzzer':
add_motif(buzzer, motif)
elif motif.instrument == 'voice':
add_motif(voice, motif)
csd.output()
args = ['csound', '-d', '%s.csd' % s]
subprocess.call(args)
f = open('%s.csd' % s)
csd_string = f.read()
f.close()
song_name = '%s.ogg' % req.song_name
args = ['oggenc', '-o', song_name, '%s.wav' % s]
subprocess.call(args)
args = ['vorbiscomment', '-a', song_name,
'-t', "ARTIST=%s" % req.artist,
'-t', "TITLE=%s" % req.song_name,
'-t', "ALBUM=%s" % req.album,
'-t', "GENRE=%s" % 'Electronica',
'-t', "CSOUND=%s" % csd_string]
subprocess.call(args)
args = ['ogg123', song_name]
subprocess.call(args)
class Motif(object):
def __init__(self, motif_start_time, motif_repeat, motif_amplitude, score, note_duration, internote_delay, instrument):
self.motif_start_time = motif_start_time
self.motif_repeat = motif_repeat
self.motif_amplitude = motif_amplitude
self.score = score
self.note_duration = note_duration
self.internote_delay = internote_delay
self.instrument = instrument
class Request(object):
def __init__(self, song_name, artist, album, motifs):
self.song_name = song_name
self.artist = artist
self.album = album
self.motifs = motifs
def heads():
return (random.random() < 0.5)
def biasedFlip(p):
return (random.random() < p)
def selectInstrument():
if heads():
return 'oscil'
else:
return 'buzzer'
def selectInterval():
return 0.15, 0.05
def triggerCreate(song_name, artist, album, motifs):
handle_create_song(Request(song_name, artist, album, motifs))
def random_note():
bases = ["A", "B", "C", "D", "E", "F", "G"]
unsharpable = ["E", "B"]
unflatable = ["C", "F"]
octaves = map(str, range(2,6))
mods = ["", "#"]
base = random.choice(bases)
mods = [""]
if not base in unsharpable:
mods.append("#")
mod = random.choice(mods)
octave = random.choice(octaves)
return base + mod + octave
def random_motif(start_time):
#notes = " ".join([random_note() for i in range(10)])
#notes = "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6 P".split(" ")
notes = "C3 C#3 E3 F3 G3 G#3 B4 C4 C#4 E4 F4 G4 G#4".split(" ")
score = matrixmusic.create_pair_score(notes, 15) * 5
print("Random score: " + str(score))
opts = [("voice", 1.0, 1.5),
#("oscil", 1.0, 1.5),
("voice", 3.0, 1.5)]
#("oscil", 3.0, 1.5)]
opt = random.choice(opts)
return Motif(start_time, 12, 0.05, score, opt[1], opt[2], opt[0])
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <artist> <album name>" % sys.argv[0]
exit()
else:
artist = sys.argv[1]
album = sys.argv[2]
global song_publisher, oscillator, buzzer, voice
oscillator = Csound.oscil()
buzzer = Csound.buzz()
voice = Csound.fmvoice()
#voice = Csound.voice()
for i in xrange(1, 16384):
song_title = "song_%d" % i
#motifs = [ Motif(0.0, 12, 0.32, "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6", 0.15, 0.05, selectInstrument()) ]
motifs = [random_motif( | i*0.8) for i in range(3)]
# if biasedFlip(0.8):
# motifs.append(Motif(3.0, 10, 0.32, "A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5 A5 B5 D6 E6 F#6", a, b, selectInstrument()))
# if biasedFlip(0.9):
# motifs.append(Motif(6.0, 4, 0.10, "A2 B2 D3 D3 F#3 A3 B3 D4 E4 F#4 A4 B4 D5 E5 F#5", 0.3, 0.1, selectInstrument()))
triggerCreate(song_title, artist, album, motifs)
print "Created song %s" % song_title
time.sleep(10 | )
|
shootstar/novatest | nova/api/openstack/wsgi.py | Python | apache-2.0 | 44,600 | 0.000112 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# | a copy of the License at
#
# http://www.apache.org/licenses/LICENSE | -2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import math
import time
from xml.dom import minidom
from lxml import etree
import webob
from nova.api.openstack import xmlutil
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger(__name__)
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
def cache_db_items(self, key, items, item_key='id'):
"""
Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item[item_key]] = item
def get_db_items(self, key):
"""
Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""
Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in SUPPORTED_CONTENT_TYPES:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
if not attr.startswith("xmlns"):
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
|
dragorosson/heat | heat/tests/engine/service/test_stack_create.py | Python | apache-2.0 | 15,923 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.rpc import dispatcher
from oslo_service import threadgroup
import six
from heat.common import exception
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import properties
from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import service
from heat.engine import stack
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
class StackCreateTest(common.HeatTestCase):
def setUp(self):
super(StackCreateTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(threadgroup, 'ThreadGroup')
@mock.patch.object(stack.Stack, 'validate')
def _test_stack_create(self, stack_name, mock_validate, mock_tg):
mock_tg.return_value = tools.DummyThreadGroup()
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stk = tools.get_stack(stack_name, self.ctx)
mock_tmpl = self.patchobject(templatem, 'Template', return_value=stk.t)
mock_env = self.patchobject(environment, 'Environment',
return_value=stk.env)
mock_stack = self.patchobject(stack, 'Stack', return_value=stk)
result = self.man.create_stack(self.ctx, stack_name,
template, params, None, {})
self.assertEqual(stk.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
mock_tmpl.assert_called_once_with(template, files=None, env=stk.env)
mock_env.assert_called_once_with(params)
mock_stack.assert_called_once_with(self.ctx, stack_name, stk.t,
owner_id=None, nested_depth=0,
user_creds_id=None,
stack_user_project_id=None,
convergence=False,
parent_resource=None)
mock_validate.assert_called_once_with()
def test_stack_create(self):
stack_name = 'service_create_test_stack'
self._test_stack_create(stack_name)
def test_stack_create_equals_max_per_tenant(self):
cfg.CONF.set_override('max_stacks_per_tenant', 1)
stack_name = 'service_create_test_stack_equals_max'
self._test_stack_create(stack_name)
def test_stack_create_exceeds_max_per_tenant(self):
cfg.CONF.set_override('max_stacks_per_tenant', 0)
stack_name = 'service_create_test_stack_exceeds_max'
ex = self.assertRaises(dispatcher.ExpectedException,
self._test_stack_create, stack_name)
self.assertEqual(exception.RequestLimitExceeded, ex.exc_info[0])
self.assertIn("You have reached the maximum stacks per tenant",
six.text_type(ex.exc_info[1]))
@mock.patch.object(stack.Stack, 'validate')
def test_stack_create_verify_err(self, mock_validate):
mock_validate.side_effect = exception.StackValidationFailed(message='')
stack_name = 'service_create_verify_err_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stk = tools.get_stack(stack_name, self.ctx)
mock_tmpl = self.patchobject(templatem, 'Template', return_value=stk.t)
mock_env = self.patchobject(environment, 'Environment',
return_value=stk.env)
mock_stack = self.patchobject(stack, 'Stack', return_value=stk)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
self.ctx, stack_name, template, params,
None, {})
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
mock_tmpl.assert_called_once_with(template, files=None, env=stk.env)
mock_env.assert_called_once_with(params)
mock_stack.assert_called_once_with(self.ctx, stack_name, stk.t,
owner_id=None, nested_depth=0,
user_creds_id=None,
stack_user_project_id=None,
convergence=False,
parent_resource=None)
def test_stack_create_invalid_stack_name(self):
stack_name = 'service_create_test_stack_invalid_name'
stack = tools.get_stack('test_stack', self.ctx)
self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
self.ctx, stack_name, stack.t.t, {}, None, {})
def test_stack_create_invalid_resource_name(self):
stack_name = 'stack_create_invalid_resource_name'
stk = tools.get_stack(stack_name, self.ctx)
tmpl = dict(stk.t)
tmpl['resources']['Web/Server'] = tmpl['resources']['WebServer']
del tmpl['resources']['WebServer']
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
self.ctx, stack_name,
stk.t.t, {}, None, {})
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'create_stack_user_project_id')
def test_stack_create_authorization_failure(self, mock_create):
stack_name = 'stack_create_authorization_failure'
stk = tools.get_stack(stack_name, self.ctx)
mock_create.side_effect = exception.AuthorizationFailure
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
self.ctx, stack_name,
stk.t.t, {}, None, {})
self.assertEqual(exception.StackValidationFailed, ex.exc_info[0])
def test_stack_create_no_credentials(self):
cfg.CONF.set_default('deferred_auth_method', 'password')
stack_name = 'test_stack_create_no_credentials'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
stk = tools.get_stack(stack_name, self.ctx)
# force check for credentials on create
stk['WebServer'].requires_deferred_auth = True
mock_tmpl = self.patchobject(templatem, 'Template', return_value=stk.t)
mock_env = self.patchobject(environment, 'Environment',
return_value=stk.env)
mock_stack = self.patchobject(stack, 'Stack' | , return_value=stk)
# test stack create using context without password
ctx_no_pwd = utils.dummy_context(password=None)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
ctx_no_pwd, stack_name,
template, params, None, {}, None)
self.assertEqual(exception.MissingCredentialError, ex.exc_info[0])
self.assertEqual('Missing required credential: X-Auth-Key' | ,
six.text_type(ex.exc_info[1]))
mock_tmpl.assert_called_once_with(template, |
AdaCore/spark2014 | testsuite/gnatprove/tests/Q217-039__assume/test.py | Python | gpl-3.0 | 96 | 0 | from test_support import check_out | put_file, prove_all
prov | e_all()
check_output_file(sort=True)
|
pyrocko/pyrocko | examples/trace_restitution_pz.py | Python | gpl-3.0 | 1,069 | 0 | from pyrocko import pz, io, trace
from pyrocko.example import get_example_data
# Download example data
get_example_data('STS2-Generic.polezero.txt')
get_example_data('test.mseed')
# read poles and zeros from SAC format pole-zero file
zeros, poles, constant = pz.read_sac_zpk('STS2-Generic.polezero.txt')
# one more zero to convert from velocity->counts to displacem | ent->counts
zeros.append(0.0j)
rest_sts2 = trace.PoleZeroResponse(
zeros=zeros,
poles=poles,
constant=constant)
traces = io.load('test.mseed')
out_traces = list(traces)
for tr in traces:
displacement = tr.transfer(
1000., # rise and fall of time window taper in [s]
(0.001, 0.002, 5., 10.), # frequency domain taper in [Hz]
transfer_function=rest_sts2,
invert=True) # to | change to (counts->displacement)
# change channel id, so we can distinguish the traces in a trace viewer.
displacement.set_codes(channel='D'+tr.channel[-1])
out_traces.append(displacement)
io.save(out_traces, 'displacement.mseed')
|
fxia22/ASM_xf | PythonD/site_python/MMTK/Random.py | Python | gpl-2.0 | 5,293 | 0.004723 | # Functions for finding random points and orientations.
#
# Written by: Konrad Hinsen
# Last revision: 2000-8-9
#
"""This module defines various random quantities that are useful in
molecular simulations. For obtaining random numbers, it tries to use
the RNG module, which is part of the LLNL package distribution, which
also contains Numerical Python. If RNG is not available, it
uses the random number generators in modules RandomArray (part of
Numerical Python) and whrandom (in the Python standard library).
"""
import Numeric
from Scientific.Geometry import Vector
from Numeric import dot
from Scientific.Geometry.Transformation import Rotation
import ParticleProperties, Units
try:
import RNG
except ImportError:
RNG = None
if RNG is None:
random = __import__('random')
import whrandom
from RandomArray import uniform, seed
seed(1, 1)
whrandom.seed(1, 1, 1)
def initializeRandomNumbersFromTime():
whrandom.seed(0, 0, 0)
seed(0, 0)
def gaussian(mean, std, shape=None):
if shape is None:
x = random.normalvariate(0., 1.)
else:
x = Numeric.zeros(shape, Numeric.Float)
xflat = Numeric.ravel(x)
for i in range(len(xflat)):
xflat[i] = random.normalvariate(0., 1.)
return mean + std*x
else:
_uniform_generator = \
RNG.CreateGenerator(-1, RNG.UniformDistribution(0., 1.))
_gaussian_generator = \
RNG.CreateGenerator(-1, RNG.NormalDistribution(0., 1.))
def initializeRandomNumbersFromTime():
global _uniform_generator, _gaussian_generator
_uniform_generator = \
RNG.CreateGenerator(0, RNG.UniformDistribution(0., 1.))
_gaussian_generator = \
RNG.CreateGenerator(0, RNG.NormalDistribution(0., 1.))
def uniform(x1, x2, shape=None):
if shape is None:
x = _uniform_generator.ranf()
else:
n = Numeric.multiply.reduce(shape)
x = _uniform_generator.sample(n)
x.shape = shape
return x1+(x2-x1)*x
def gaussian(mean, std, shape=None):
if shape is None:
x = _gaussian_generator.ranf()
else:
n = Numeric.multiply.reduce(shape)
x = _gaussian_generator.sample(n)
x.shape = shape
return mean+std*x
#
# Random point in a rectangular box centered around the origin
#
def randomPointInBox(a, b = None, c = None):
"""Returns a vector drawn from a uniform distribution within a
rectangular box with edge lengths |a|, |b|, |c|. If |b| and/or |c|
are omitted, they are taken to be equal to |a|."""
if b is None: b = a
if c is None: c = a
x = uniform(-0.5*a, 0.5*a)
y = uniform(-0.5*b, 0.5*b)
z = uniform(-0.5*c, 0.5*c)
return Vector(x, y, z)
#
# Random point in a sphere around the origin.
#
def randomPointInSphere(r):
"""Returns a vector drawn from a uniform distribution within
a sphere of radius |r|."""
rsq = r*r
while 1:
x = uniform(-r, r, (3,))
if dot(x, x) < rsq: break
return Vector(x)
#
# Random direction (unit vector). |
#
def randomDirection():
"""Returns a vector drawn from a uniform distribution on
the surface of a unit sphere."""
r = randomPointInSphere(1.)
return r.normal()
def randomDirect | ions(n):
"""Returns a list of |n| vectors drawn from a uniform distribution on
the surface of a unit sphere. If |n| is negative, return a deterministic
list of not more than -|n| vectors of unit length (useful for
testing purposes)."""
if n < 0:
list = [Vector(1., 0., 0.), Vector(0., -1., 0.), Vector(0., 0., 1.),
Vector(-1., 1., 0.).normal(), Vector(-1., 0., 1.).normal(),
Vector(0., 1., -1.).normal(), Vector(1., -1., 1.).normal()]
list = list[:-n]
else:
list = []
for i in range(n):
list.append(randomDirection())
return list
#
# Random rotation.
#
def randomRotation(max_angle = Numeric.pi):
"""Returns a Rotation object describing a random rotation
with a uniform axis distribution and angles drawn from
a uniform distribution between -|max_angle| and |max_angle|."""
return Rotation(randomDirection(), uniform(-max_angle, max_angle))
#
# Random velocity (gaussian)
#
def randomVelocity(temperature, mass):
"""Returns a random velocity vector for a particle of a given
|mass|, drawn from a Boltzmann distribution for the given
|temperature|."""
sigma = Numeric.sqrt((temperature*Units.k_B)/(mass*Units.amu))
return Vector(gaussian(0., sigma, (3,)))
#
# Random ParticleVector (gaussian)
#
def randomParticleVector(universe, width):
"""Returns a ParticleVector object in which each vector is
drawn from a Gaussian distribution with a given |width| centered
around zero."""
data = gaussian(0., 0.577350269189*width, (universe.numberOfPoints(), 3))
return ParticleProperties.ParticleVector(universe, data)
#
# Test code
#
if __name__ == '__main__':
mean = 1.
std = 5.
n = 10000
values = gaussian(mean, std, (n,))
m = Numeric.sum(values)/n
print mean, m
print std, Numeric.sqrt(Numeric.sum((values-m)**2)/n)
|
dreibh/planetlab-lxc-plcapi | PLC/Methods/AddConfFile.py | Python | bsd-3-clause | 1,107 | 0.004517 | from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.ConfFiles import Co | nfFile, ConfFiles
from PLC.Auth import Auth
can_update = lambda field_value: field_value[0] not in \
['conf_file_id', 'node_ids', 'nodegroup_ids']
class AddConfFile(Method):
"""
Adds a new node configuration file. Any fields specified in
conf_file_fields are used, otherwise defaults are used.
Returns the new conf_file_id (> 0) if s | uccessful, faults otherwise.
"""
roles = ['admin']
conf_file_fields = dict(list(filter(can_update, list(ConfFile.fields.items()))))
accepts = [
Auth(),
conf_file_fields
]
returns = Parameter(int, 'New conf_file_id (> 0) if successful')
def call(self, auth, conf_file_fields):
conf_file_fields = dict(list(filter(can_update, list(conf_file_fields.items()))))
conf_file = ConfFile(self.api, conf_file_fields)
conf_file.sync()
self.event_objects = {'ConfFile': [conf_file['conf_file_id']]}
return conf_file['conf_file_id']
|
stevefaeembra/cartogram-plugin | form.py | Python | gpl-2.0 | 6,749 | 0 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created: Mon Mar 24 20:22:18 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.NonModal)
Dialog.resize(374, 317)
icon = QtGui.QIcon()
icon.addFile(_fromUtf8("centroids.png"))
Dialog.setWindowIcon(icon)
Dialog.setSizeGripEnabled(True)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridlayout.addWidget(self.label_3, 0, 0, 1, 2)
self.inShape = QtGui.QComboBox(Dialog)
self.inShape.setObjectName(_fromUtf8("inShape"))
self.gridlayout.addWidget(self.inShape, 1, 0, 1, 2)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridlayout.addWidget(self.label_4, 2, 0, 1, 2)
self.inFields = QtGui.QComboBox(Dialog)
self.inFields.setObjectName(_fromUtf8("inFields"))
self.gridlayout.addWidget(self.inFields, 3, 0, 1, 2)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridlayout.addWidget(self.label_2, 4, 0, 1, 2)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.outShape = QtGui.QLineEdit(Dialog)
self.outShape.setReadOnly(True)
self.outShape.setObjectName(_fromUtf8("outShape"))
self.hboxlayout.addWidget(self.outShape)
self.toolOut = QtGui.QToolButton(Dialog)
self.toolOut.setObjectName(_fromUtf8("toolOut"))
self.hboxlayout.addWidget(self.toolOut)
self.gridlayout.addLayout(self.hboxlayout, 5, 0, 1, 2)
self.hboxlayout1 = QtGui.QHBoxLayout()
self.hboxlayout1.setObjectName(_fromUtf8("hboxlayout1"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.hboxlayout1.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(40,
20,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Minimum
)
self.hboxlayout1.addItem(spacerItem)
self.spnIterations = QtGui.QSpinBox(Dialog)
self.spnIterations.setProperty("value", 5)
self.spnIterations.setObjectName(_fromUtf8("spnIterations"))
self.hboxlayout1.addWidget(self.spnIterations)
self.gridlayout.addLayout(self.hboxlayout1, 6, 0, 1, 2)
self.chkKeep = QtGui.QCheckBox(Dialog)
self.chkKeep.setChecked(True)
self.chkKeep.setObjectName(_fromUtf8("chkKeep"))
self.gridlayout.addWidget(self.chkKeep, 7, 0, 1, 2)
spacerItem1 = QtGui.QSpacerItem(356,
31,
QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Expanding
| )
self.gridlayout.addItem(spacerItem1, 8, 0, 1, 2)
self.txtProgress = QtGui.QLabel(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self
.txtPro | gress
.sizePolicy()
.hasHeightForWidth())
self.txtProgress.setSizePolicy(sizePolicy)
self.txtProgress.setMinimumSize(QtCore.QSize(113, 20))
self.txtProgress.setText(_fromUtf8(""))
self.txtProgress.setObjectName(_fromUtf8("txtProgress"))
self.gridlayout.addWidget(self.txtProgress, 9, 0, 1, 1)
self.buttonBox_2 = QtGui.QDialogButtonBox(Dialog)
self.buttonBox_2.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox_2.setStandardButtons(QtGui.QDialogButtonBox.Close |
QtGui.QDialogButtonBox.NoButton |
QtGui.QDialogButtonBox.Ok)
self.buttonBox_2.setObjectName(_fromUtf8("buttonBox_2"))
self.gridlayout.addWidget(self.buttonBox_2, 9, 1, 1, 1)
self.progressBar = QtGui.QProgressBar(Dialog)
self.progressBar.setProperty("value", 24)
self.progressBar.setAlignment(QtCore.Qt.AlignHCenter)
self.progressBar.setTextVisible(False)
self.progressBar.setFormat(_fromUtf8(""))
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.gridlayout.addWidget(self.progressBar, 10, 0, 1, 2)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox_2,
QtCore.SIGNAL(_fromUtf8("accepted()")),
Dialog.accept
)
QtCore.QObject.connect(self.buttonBox_2,
QtCore.SIGNAL(_fromUtf8("rejected()")),
Dialog.close
)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Generate Centroids", None))
self.label_3.setText(_translate("Dialog", "Input Shapefile:", None))
self.label_4.setText(_translate("Dialog", "Area Field:", None))
self.label_2.setText(_translate("Dialog", "Output Shapefile:", None))
self.toolOut.setText(_translate("Dialog", "Browse", None))
self.label.setText(_translate("Dialog",
"Number of iterations to perform:",
None))
self.chkKeep.setText(_translate("Dialog",
"Keep intermediate shapefiles",
None))
|
by46/flask-kits | setup.py | Python | mit | 2,009 | 0 | from __future__ import print_function
import io
import os.path
import re
from distutils.text_file import TextFile
from setuptools import find_packages, setup
home = os.path.abspath(os.path.dirname(__file__))
missing = object()
def read_description(*files, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = [io.open(name, encoding=encoding).read() for name in files]
return sep.join(buf)
def read_dependencies(requirements=missing):
if requirements is None:
return []
if requirements is missing:
requirements = 'requirements.txt'
if not os.path.isfile(requirements):
return []
text = TextFile(requirements, lstrip_ws=True)
try:
return text.readlines()
finally: |
text.close()
def read_version(module_name):
with open(os.path.join(module_name, '__i | nit__.py'), 'r') as fd:
result = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE)
return result.group(1) if result else '0.0.1'
setup(
name='flask_kits',
version=read_version('flask_kits'),
license='The MIT License',
description='demo',
author='recipe',
author_email='ycs_ctbu_2010@126.com',
install_requires=read_dependencies(),
include_package_data=True,
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
]
)
|
stevenmizuno/QGIS | python/plugins/db_manager/db_plugins/postgis/connector.py | Python | gpl-2.0 | 44,359 | 0.002976 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistr | ibute it and/or modi | fy *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from functools import cmp_to_key
from qgis.PyQt.QtCore import QRegExp
from qgis.core import Qgis, QgsCredentials, QgsDataSourceUri
from ..connector import DBConnector
from ..plugin import ConnectionError, DbError, Table
import os
import psycopg2
import psycopg2.extensions
# use unicode!
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
def classFactory():
return PostGisDBConnector
class PostGisDBConnector(DBConnector):
def __init__(self, uri):
DBConnector.__init__(self, uri)
self.host = uri.host() or os.environ.get('PGHOST')
self.port = uri.port() or os.environ.get('PGPORT')
username = uri.username() or os.environ.get('PGUSER')
password = uri.password() or os.environ.get('PGPASSWORD')
# Do not get db and user names from the env if service is used
if not uri.service():
if username is None:
username = os.environ.get('USER')
self.dbname = uri.database() or os.environ.get('PGDATABASE') or username
uri.setDatabase(self.dbname)
expandedConnInfo = self._connectionInfo()
try:
self.connection = psycopg2.connect(expandedConnInfo)
except self.connection_error_types() as e:
err = str(e)
uri = self.uri()
conninfo = uri.connectionInfo(False)
for i in range(3):
(ok, username, password) = QgsCredentials.instance().get(conninfo, username, password, err)
if not ok:
raise ConnectionError(e)
if username:
uri.setUsername(username)
if password:
uri.setPassword(password)
newExpandedConnInfo = uri.connectionInfo(True)
try:
self.connection = psycopg2.connect(newExpandedConnInfo)
QgsCredentials.instance().put(conninfo, username, password)
except self.connection_error_types() as e:
if i == 2:
raise ConnectionError(e)
err = str(e)
finally:
# remove certs (if any) of the expanded connectionInfo
expandedUri = QgsDataSourceUri(newExpandedConnInfo)
sslCertFile = expandedUri.param("sslcert")
if sslCertFile:
sslCertFile = sslCertFile.replace("'", "")
os.remove(sslCertFile)
sslKeyFile = expandedUri.param("sslkey")
if sslKeyFile:
sslKeyFile = sslKeyFile.replace("'", "")
os.remove(sslKeyFile)
sslCAFile = expandedUri.param("sslrootcert")
if sslCAFile:
sslCAFile = sslCAFile.replace("'", "")
os.remove(sslCAFile)
finally:
# remove certs (if any) of the expanded connectionInfo
expandedUri = QgsDataSourceUri(expandedConnInfo)
sslCertFile = expandedUri.param("sslcert")
if sslCertFile:
sslCertFile = sslCertFile.replace("'", "")
os.remove(sslCertFile)
sslKeyFile = expandedUri.param("sslkey")
if sslKeyFile:
sslKeyFile = sslKeyFile.replace("'", "")
os.remove(sslKeyFile)
sslCAFile = expandedUri.param("sslrootcert")
if sslCAFile:
sslCAFile = sslCAFile.replace("'", "")
os.remove(sslCAFile)
self.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
c = self._execute(None, u"SELECT current_user,current_database()")
self.user, self.dbname = self._fetchone(c)
self._close_cursor(c)
self._checkSpatial()
self._checkRaster()
self._checkGeometryColumnsTable()
self._checkRasterColumnsTable()
def _connectionInfo(self):
return str(self.uri().connectionInfo(True))
def _checkSpatial(self):
""" check whether postgis_version is present in catalog """
c = self._execute(None, u"SELECT COUNT(*) FROM pg_proc WHERE proname = 'postgis_version'")
self.has_spatial = self._fetchone(c)[0] > 0
self._close_cursor(c)
return self.has_spatial
def _checkRaster(self):
""" check whether postgis_version is present in catalog """
c = self._execute(None, u"SELECT COUNT(*) FROM pg_proc WHERE proname = 'postgis_raster_lib_version'")
self.has_raster = self._fetchone(c)[0] > 0
self._close_cursor(c)
return self.has_raster
def _checkGeometryColumnsTable(self):
c = self._execute(None,
u"SELECT relkind = 'v' OR relkind = 'm' FROM pg_class WHERE relname = 'geometry_columns' AND relkind IN ('v', 'r', 'm', 'p')")
res = self._fetchone(c)
self._close_cursor(c)
self.has_geometry_columns = (res is not None and len(res) != 0)
if not self.has_geometry_columns:
self.has_geometry_columns_access = self.is_geometry_columns_view = False
else:
self.is_geometry_columns_view = res[0]
# find out whether has privileges to access geometry_columns table
priv = self.getTablePrivileges('geometry_columns')
self.has_geometry_columns_access = priv[0]
return self.has_geometry_columns
def _checkRasterColumnsTable(self):
c = self._execute(None,
u"SELECT relkind = 'v' OR relkind = 'm' FROM pg_class WHERE relname = 'raster_columns' AND relkind IN ('v', 'r', 'm', 'p')")
res = self._fetchone(c)
self._close_cursor(c)
self.has_raster_columns = (res is not None and len(res) != 0)
if not self.has_raster_columns:
self.has_raster_columns_access = self.is_raster_columns_view = False
else:
self.is_raster_columns_view = res[0]
# find out whether has privileges to access geometry_columns table
self.has_raster_columns_access = self.getTablePrivileges('raster_columns')[0]
return self.has_raster_columns
def getInfo(self):
c = self._execute(None, u"SELECT version()")
res = self._fetchone(c)
self._close_cursor(c)
return res
def getSpatialInfo(self):
""" returns tuple about PostGIS support:
- lib version
- geos version
- proj version
- installed scripts version
- released scripts version
"""
if not self.has_spatial:
return
try:
c = self._execute(None,
u"SELECT postgis_lib_version(), postgis_geos_version(), postgis_proj_version(), postgis_scripts_in |
PyPlanet/PyPlanet | pyplanet/apps/contrib/funcmd/__init__.py | Python | gpl-3.0 | 4,483 | 0.022376 | from pyplanet.apps.config import AppConfig
from pyplanet.apps.contrib.funcmd.view import EmojiToolbarView
from pyplanet.contrib.command import Command
from pyplanet.apps.core.maniaplanet import callbacks as mp_signals
from pyplanet.contrib.setting import Setting
class FunCmd(AppConfig):
app_dependencies = ['core.maniaplanet']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setting_emoji_toolbar = Setting(
'emoji_toolbar', ' | Display Emoji Toolbar', Setting.CAT_DESIGN, type=bool, default=True,
description='Display the Emoji Toolbar to users | .',
change_target=self.reload_settings
)
self.emoji_toolbar = EmojiToolbarView(self)
async def on_start(self):
await self.context.setting.register(
self.setting_emoji_toolbar
)
await self.instance.command_manager.register(
Command(command='afk', target=self.command_afk, admin=False, description='Set yourself as AFK'),
Command(command='bootme', target=self.command_bootme, admin=False, description='Boot yourself from the server'),
Command(command='rq', aliases=['ragequit'], target=self.command_rq, admin=False, description='Ragequit from the server'),
Command(command='gg', target=self.command_gg, admin=False, description='Send Good Game to everyone'),
Command(command='n1', target=self.command_n1, admin=False, description='Send Nice One to everyone'),
Command(command='nt', target=self.command_nt, admin=False, description='Send Nice Try/Nice Time to everyone'),
)
if self.instance.game.game == 'sm':
await self.instance.command_manager.register(
Command(command='ns', target=self.command_ns, admin=False, description='Send Nice Shot to everyone'),
)
self.context.signals.listen(mp_signals.player.player_connect, self.player_connect)
if await self.setting_emoji_toolbar.get_value():
await self.emoji_toolbar.display()
async def reload_settings(self, *args, **kwargs):
if await self.setting_emoji_toolbar.get_value():
await self.emoji_toolbar.display()
else:
await self.emoji_toolbar.hide()
async def player_connect(self, player, *args, **kwargs):
if await self.setting_emoji_toolbar.get_value():
await self.emoji_toolbar.display(player_logins=[player.login])
async def command_afk(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.gbx.multicall(
self.instance.gbx('ForceSpectator', player.login, 3),
self.instance.chat('$fff {}$z$s$fff is now away from keyboard.'.format(player.nickname))
)
async def command_bootme(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.gbx.multicall(
self.instance.chat('$fff {}$z$s$fff chooses to boot back to real life!'.format(player.nickname)),
self.instance.gbx('Kick', player.login, 'chooses to boot to real life (/bootme)'),
)
async def command_rq(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.gbx.multicall(
self.instance.chat('$f00 {}$z$s$f00 rage quits.'.format(player.nickname)),
self.instance.gbx('Kick', player.login, 'rage quit (/rq)'),
)
async def command_gg(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.chat('$fff {}$z$s$fff Good Game everyone!'.format(player.nickname))
async def command_n1(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.chat('$fff {}$z$s$fff Nice one!'.format(player.nickname))
async def command_ns(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
await self.instance.chat('$fff {}$z$s$fff Nice shot!'.format(player.nickname))
async def command_nt(self, player, data, **kwargs):
if 'admin' in self.instance.apps.apps and self.instance.apps.apps['admin'].server.chat_redirection:
return
if self.instance.game.game == 'sm':
await self.instance.chat('$fff {}$z$s$fff Nice try!'.format(player.nickname))
else:
await self.instance.chat('$fff {}$z$s$fff Nice time!'.format(player.nickname))
|
metabrainz/picard | test/formats/test_apev2.py | Python | gpl-2.0 | 11,058 | 0.000724 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2019, 2021 Philipp Wolfer
# Copyright (C) 2020-2021 Laurent Monin
# Copyright (C) 2021 Bob Swift
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from mutagen.apev2 import (
BINARY,
APEValue,
)
from test.picardtestcase import (
PicardTestCase,
create_fake_png,
)
from picard import config
from picard.formats import (
apev2,
open_,
)
from picard.formats.mutagenext.tak import native_tak
from picard.metadata import Metadata
from .common import (
TAGS,
CommonTests,
load_metadata,
load_raw,
save_and_load_metadata,
save_metadata,
save_raw,
skipUnlessTestfile,
)
from .coverart import CommonCoverArtTests
VALID_KEYS = {
' valid Key}',
'{ $ome tag~}',
'xx',
'x' * 255,
}
INVALID_KEYS = {
'invalid\x7fkey',
'invalid\x19key',
'',
'x',
'x' * 256,
'ID3',
'TAG',
'OggS',
'MP+',
}
SUPPORTED_TAGS = set(TAGS) - apev2.UNSUPPORTED_TAGS
class CommonApeTests:
class ApeTestCase(CommonTests.TagFormatsTestCase):
def setup_tags(self):
super().setup_tags()
self.unsupported_tags['r128_album_gain'] = '-2857'
self.unsupported_tags['r128_track_gain'] = '-2857'
def test_supports_tags(self):
supports_tag = self.format.supports_tag
for key in VALID_KEYS | SUPPORTED_TAGS:
self.assertTrue(supports_tag(key), '%r should be supported' % key)
for key in INVALID_KEYS | apev2.UNSUPPORTED_TAGS:
self.assertFalse(supports_tag(key), '%r should be unsupported' % key)
@skipUnlessTestfile
def test_invalid_coverart(self):
metadata = {
'Cover Art (Front)': APEValue(b'filename.png\0NOTPNGDATA', BINARY)
}
save_raw(self.filename, metadata)
loaded_metadata = load_metadata(self.filename)
self.assertEqual(0, len(loaded_metadata.images))
@skipUnlessTestfile
def test_clear_tags_preserve_images_all(self):
imagedata = APEValue(b'filename.png\0' + create_fake_png(b'a'), BINARY)
save_raw(self.filename, {
'Cover Art (Front)': imagedata,
'Cover Art': imagedata,
'Cover Art (foo)': imagedata,
'cover art (bar)': imagedata,
})
config.setting['clear_existing_tags'] = True
config.setting['preserve_images'] = True
metadata = save_and_load_metadata(self.filename, Metadata())
self.assertEqual(4, len(metadata.images))
config.setting['preserve_images'] = False
metadata = save_and_load_metadata(self.filename, Metadata())
self.assertEqual(0, len(metadata.images))
def test_supports_extended_tags(self):
performer_tag = "performer:accordéon clavier « boutons »"
self.assertTrue(self.format.supports_tag(performer_tag))
self.assertTrue(self.format.supports_tag('lyrics:foó'))
self.assertTrue(self.format.supports_tag('comment:foó'))
def test_case_insensitive_reading(self):
self._read_case_insensitive_tag('artist', 'Artist')
self._read_case_insensitive_tag('albumartist', 'Album Artist')
self._read_case_insensitive_tag('performer:', 'Performer')
self._read_case_insensitive_tag('tracknumber', 'Track')
self._read_case_insensitive_tag('discnumber', 'Disc')
@skipUnlessTestfile
def test_ci_tags_preserve_case(self):
# Ensure values are not duplicated on repeated save and are saved
# case preserving.
for name in ('CUStom', 'ARtist'):
tags = {}
tags[name] = 'foo'
save_raw(self.filename, tags)
loaded_metadata = load_metadata(self.filename)
loaded_metadata[name.lower()] = 'bar'
save_metadata(self.filename, loaded_metadata)
raw_metadata = dict(load_raw(self.filename))
self.assertIn(name, raw_metadata)
self.assertEqual(
raw_metadata[name],
loaded_metadata[name.lower()])
self.assertEqual(1, len(raw_metadata[name]))
self.assertNotIn(name.upper(), raw_metadata)
def _read_case_insensitive_tag(self, name, ape_name):
upper_ape_name = ape_name.upper()
metadata = {
upper_ape_name: 'Some value'
}
save_raw(self.filename, metadata)
loaded_metadata = load_metadata(self.filename)
self.assertEqual(metadata[upper_ape_name], loaded_metadata[name])
save_metadata(self.filename, loaded_metadata)
raw_metadata = load_raw(self.filename)
self.assertIn(upper_ape_name, raw_metadata.keys())
self.assertEqual(metadata[upper_ape_name], raw_metadata[ape_name])
class MonkeysAudioTest(CommonApeTests.ApeTestCase):
testfile = 'test.ape'
supports_ratings = False
expected_info = {
'length': 82,
'~cha | nnels': '2',
'~sample_rate': '44100',
'~bits_per_sample': '16',
}
unexpected_info = ['~video']
class WavPackTest(CommonApeTests.ApeTestCase):
testfile = 'test.wv'
supports_ratings = False
expected_ | info = {
'length': 82,
'~channels': '2',
'~sample_rate': '44100',
}
unexpected_info = ['~video']
def setUp(self):
super().setUp()
config.setting['rename_files'] = True
config.setting['move_files'] = False
config.setting['ascii_filenames'] = False
config.setting['windows_compatibility'] = False
config.setting['windows_long_paths'] = True
config.setting['dont_write_tags'] = True
config.setting['preserve_timestamps'] = False
config.setting['delete_empty_dirs'] = False
config.setting['save_images_to_files'] = False
config.setting['file_renaming_scripts'] = {'test_id': {'script': '%title%'}}
config.setting['selected_file_naming_script_id'] = 'test_id'
def _save_with_wavpack_correction_file(self, source_file_wvc):
# Create dummy WavPack correction file
open(source_file_wvc, 'a').close()
# Open file and rename it
f = open_(self.filename)
f._copy_loaded_metadata(f._load(self.filename))
f.metadata['title'] = 'renamed_' + os.path.basename(self.filename)
self.assertTrue(os.path.isfile(self.filename))
target_file_wv = f._save_and_rename(self.filename, f.metadata)
target_file_wvc = target_file_wv + 'c'
# Register cleanups
self.addCleanup(os.unlink, target_file_wv)
self.addCleanup(os.unlink, target_file_wvc)
return (target_file_wv, target_file_wvc)
@skipUnlessTestfile
def test_save_wavpack_correction_file(self):
source_file_wvc = self.filename + 'c'
(target_file_wv, target_file_wvc) = self._save_with_wavpack_correction_file(source_file_wvc)
# Check both the WavPack file and the correction file got moved
self.assertFalse(os.path.isfile(self.filename))
self.assertFalse(os.path.isfile(source_file_wvc))
self.assertTrue(os.path.isfile(target_file_wv))
self.assertTrue(os.path.isfile(target_file_wvc))
@skipUnlessTes |
remb0/CouchPotatoServer | libs/guessit/transfo/post_process.py | Python | gpl-3.0 | 2,521 | 0.000397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from guessit.patterns import subtitle_exts
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
# pylint: disable=W0631
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the | subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] = | = 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
series = node.guess['series']
lseries = series.lower()
if lseries[-4:] == ',the':
node.guess['series'] = 'The ' + series[:-4]
if lseries[-5:] == ', the':
node.guess['series'] = 'The ' + series[:-5]
|
tvtsoft/odoo8 | addons/sale_contract/__init__.py | Python | agpl-3.0 | 141 | 0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
import model | s
import report
import tests
|
HIPS/autograd | examples/rnn.py | Python | mit | 4,931 | 0.00365 | """Implements the long-short term memory character model.
This version vectorizes over multiple examples, but each string
has a fixed length."""
from __future__ import absolute_import
from __future__ import print_function
from builtins import range
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
from autograd.scipy.special import logsumexp
from os.path import dirname, join
from autograd.misc.optimizers import adam
### Helper functions #################
def sigmoid(x):
return 0.5*(np.tanh(x) + 1.0) # Output ranges from 0 to 1.
def concat_and_multiply(weights, *args):
cat_state = np.hstack(args + (np.ones((args[0].shape[0], 1)),))
return np.dot(cat_state, weights)
### Define recurrent neural net #######
def create_rnn_params(input_size, state_size, output_size,
param_scale=0.01, rs=npr.RandomState(0)):
return {'init hiddens': rs.randn(1, state_size) * param_scale,
'change': rs.randn(input_size + state_size + 1, state_size) * param_scale,
'predict': rs.randn(state_size + 1, output_size) * param_scale}
def rnn_predict(params, inputs):
def update_rnn(input, hiddens):
return np.tanh(concat_and_multiply(params['change'], input, hiddens))
def hiddens_to_output_probs(hiddens):
output = concat_and_multiply(params['predict'], hiddens)
return output - logsumexp(output, axis=1, keepdims=True) # Normalize log-probs.
num_sequences = inputs.shape[1]
hiddens = np.repeat(params['init hiddens'], num_sequences, axis=0)
output = [hiddens_to_output_probs(hiddens)]
for input in inputs: # Iterate over time steps.
hiddens = update_rnn(input, hiddens)
output.append(hiddens_to_output_probs(hiddens))
return output
def rnn_log_likelihood(params, inputs, targets):
logprobs = rnn_predict(params, inputs)
loglik = 0.0
num_time_steps, num_examples, _ = inputs.shape
for t in range(num_time_steps):
loglik += np.sum(logprobs[t] * targets[t])
return loglik / (num_time_steps * num_examples)
### Dataset setup ##################
def string_to_one_hot(string, maxchar):
"""Converts an ASCII string to a one-of-k encoding."""
ascii = np.array([ord(c) for c in string]).T
return np.array(ascii[:,None] == np.arange(maxchar)[None, :], dtype=int)
def one_hot_to_string(one_hot_matrix):
return "".join([chr(np.argmax(c)) for c in one_hot | _matrix])
def build_dataset(filename, sequence_length, alphabet_size, max_lines=-1):
"""Loads a text file, and turns each line into an encoded sequence."""
with | open(filename) as f:
content = f.readlines()
content = content[:max_lines]
content = [line for line in content if len(line) > 2] # Remove blank lines
seqs = np.zeros((sequence_length, len(content), alphabet_size))
for ix, line in enumerate(content):
padded_line = (line + " " * sequence_length)[:sequence_length]
seqs[:, ix, :] = string_to_one_hot(padded_line, alphabet_size)
return seqs
if __name__ == '__main__':
num_chars = 128
# Learn to predict our own source code.
text_filename = join(dirname(__file__), 'rnn.py')
train_inputs = build_dataset(text_filename, sequence_length=30,
alphabet_size=num_chars, max_lines=60)
init_params = create_rnn_params(input_size=128, output_size=128,
state_size=40, param_scale=0.01)
def print_training_prediction(weights):
print("Training text Predicted text")
logprobs = np.asarray(rnn_predict(weights, train_inputs))
for t in range(logprobs.shape[1]):
training_text = one_hot_to_string(train_inputs[:,t,:])
predicted_text = one_hot_to_string(logprobs[:,t,:])
print(training_text.replace('\n', ' ') + "|" +
predicted_text.replace('\n', ' '))
def training_loss(params, iter):
return -rnn_log_likelihood(params, train_inputs, train_inputs)
def callback(weights, iter, gradient):
if iter % 10 == 0:
print("Iteration", iter, "Train loss:", training_loss(weights, 0))
print_training_prediction(weights)
# Build gradient of loss function using autograd.
training_loss_grad = grad(training_loss)
print("Training RNN...")
trained_params = adam(training_loss_grad, init_params, step_size=0.1,
num_iters=1000, callback=callback)
print()
print("Generating text from RNN...")
num_letters = 30
for t in range(20):
text = ""
for i in range(num_letters):
seqs = string_to_one_hot(text, num_chars)[:, np.newaxis, :]
logprobs = rnn_predict(trained_params, seqs)[-1].ravel()
text += chr(npr.choice(len(logprobs), p=np.exp(logprobs)))
print(text)
|
interlegis/sapl | sapl/api/pagination.py | Python | gpl-3.0 | 3,967 | 0.000504 | from django.core.paginator import EmptyPage
from rest_framework import pagination
from rest_framework.response import Response
class StandardPagination(pagination.PageNumberPagination):
page_size = 10
page_size_query_param = 'page_size'
max_page_size = 100
def paginate_queryset(self, queryset, request, view=None):
if request.query_params.get('get_all', '').lower() == 'true':
return None
return super().paginate_queryset(queryset, request, view=view)
def get_paginated_response_schema(self, schema):
r = {
'type': 'object',
'properties': {
'pagination': {
'type': 'object',
'properties': {
'links': {
'type': 'object',
'properties': {
'next': {
'type': 'string',
'nullable': True,
'format': 'uri',
'example': 'http://api.example.org/accounts/?{page_query_param}=4'.format(
page_query_param=self.page_query_param)
},
'previous': {
'type': 'string',
'nullable': True,
'format': 'uri',
'example': 'http://api.example.org/accounts/?{page_query_param}=2'.format(
page_query_param=self.page_query_param)
},
}
},
'previous_page': {
'type': 'integer',
'example': 123,
},
'next_page': {
'type': 'integer',
'example': 123,
},
'start_index': {
'type': 'integer',
'example': 123,
},
'end_index': {
'type': 'integer',
'example': 123,
},
'total_entries': {
'type': 'integer',
'example': 123,
},
'total_pages': {
'type': 'integer',
'example': 123,
},
'page': {
'type': 'integer',
'example': 123,
},
}
},
'results': schema,
},
}
return r
def get_paginated_response(self, data):
try:
previous_page_number = self.page.previous_page_nu | mber()
| except EmptyPage:
previous_page_number = None
try:
next_page_number = self.page.next_page_number()
except EmptyPage:
next_page_number = None
return Response({
'pagination': {
'links': {
'next': self.get_next_link(),
'previous': self.get_previous_link(),
},
'previous_page': previous_page_number,
'next_page': next_page_number,
'start_index': self.page.start_index(),
'end_index': self.page.end_index(),
'total_entries': self.page.paginator.count,
'total_pages': self.page.paginator.num_pages,
'page': self.page.number,
},
'results': data,
})
|
akamensky/sssd | src/config/SSSDConfigTest.py | Python | gpl-3.0 | 80,155 | 0.00136 | #!/usr/bin/env python
'''
Created on Sep 18, 2009
@author: sgallagh
'''
import unittest
import os
import shutil
import tempfile
from stat import *
import sys
srcdir = os.getenv('srcdir')
if srcdir:
sys.path.insert(0, "./src/config")
srcdir = srcdir + "/src/config"
else:
srcdir = "."
import SSSDConfig
def create_temp_dir():
test_dir = os.environ.get('SSS_TEST_DIR') or "."
return tempfile.mkdtemp(dir=test_dir)
class SSSDConfigTestValid(unittest.TestCase):
def setUp(self):
self.tmp_dir = create_temp_dir()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def testServices(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
# Validate services
services = sssdconfig.list_services()
self.assertTrue('sssd' in services)
self.assertTrue('nss' in services)
self.assertTrue('pam' in services)
#Verify service attributes
sssd_service = sssdconfig.get_service('sssd')
service_opts = sssd_service.list_options()
self.assertTrue('services' in service_opts.keys())
service_list = sssd_service.get_option('services')
self.assertTrue('nss' in service_list)
self.assertTrue('pam' in service_list)
self.assertTrue('domains' in service_opts)
self.assertTrue('reconnection_retries' in service_opts)
del sssdconfig
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
sssdconfig.delete_service('sssd')
new_sssd_service = sssdconfig.new_service('sssd');
new_options = new_sssd_service.list_options();
self.assertTrue('debug_level' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('command' in new_options)
self.assertEquals(new_options['command'][0], str)
self.assertTrue('reconnection_retries' in new_options)
self.assertEquals(new_options['reconnection_retries'][0], int)
self.assertTrue('services' in new_options)
self.assertEquals(new_options['debug_level'][0], int)
self.assertTrue('domains' in new_options)
self.assertEquals(new_options['domains'][0], list)
self.assertEquals(new_options['domains'][1], str)
self.assertTrue('sbus_timeout' in new_options)
self.assertEquals(new_options['sbus_timeout'][0], int)
self.assertTrue('re_expression' in new_options)
self.assertEquals(new_options['re_expression'][0], str)
self.assertTrue('full_name_format' in new_options)
self.assertEquals(new_options['full_name_format'][0], str)
self.assertTrue('default_domain_suffix' in new_options)
self.assertEquals(new_options['default_domain_suffix'][0], str)
self.assertTrue('domain_resolution_order' in new_options)
self.assertEquals(new_options['domain_resolution_order'][0], list)
self.assertEquals(new_options['domain_resolution_order'][1], str)
del sssdconfig
def testDomains(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
#Validate domain list
domains = sssdconfig.list_domains()
self.assertTrue('LOCAL' in domains)
self.assertTrue('LDAP' in domains)
self.assertTrue('PROXY' in domains)
self.assertTrue('IPA' in domains)
#Verify domain attributes
ipa_domain = sssdconfig.get_domain('IPA')
domain_opts = ipa_domain.list_options()
self.assertTrue('debug_level' in domain_opts.keys())
self.assertTrue('id_provider' in domain_opts.keys())
self.assertTrue('auth_provider' in domain_opts.keys())
self.assertEqual(ipa_domain.get_option('debug_level'), 0xff0)
proxy_domain = sssdconfig.get_domain('PROXY')
self.assertEqual(proxy_domain.get_option('debug_level'), 1)
# Verify attributes in responders
pam_responder = sssdconfig.get_service('pam')
self.assertEqual(pam_responder.get_option('debug_level'), 2)
sudo_responder = sssdconfig.get_service('sudo')
self.assertEqual(sudo_responder.get_option('debug_level'), 0xfc10)
del sssdconfig
def testListProviders(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
junk_domain = sssdconfig.new_domain('junk')
providers = junk_domain.list_providers()
self.assertTrue('ldap' in providers.keys())
def testCreateNewLocalConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
local_domain = sssdconfig.new_domain('LOCAL')
local_domain.add_provider('local', 'id')
local_domain.set_option('debug_level', 1)
local_domain.set_option('default_shell', '/bin/tcsh')
local_domain.set_active(True)
sssdconfig.save_domain(local_domain)
of = self.tmp_dir + '/testCreateNewLocalConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0o177)
# try to import saved configuration file
config = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
config.import_config(configfile=of)
#Remove the output file
os.unlink(of)
def testCreateNewLDAPConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.new_config()
ldap_domain = sssdconfig.new_domain('LDAP')
ldap_domain.add_provider('ldap', 'id')
ldap_domain.set_option('debug_level', 1)
ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
of = self.tmp_dir + '/testCreateNewLDAPConfig.conf'
#Ensure the output file doesn't exist
try:
os.unlink(of)
except:
pass
#Write out the file
sssdconfig.write(of)
#Verify that the output file has the correct permissions
mode = os.stat(of)[ST_MODE]
#Output files should not be readable or writable by
#non-owners, and should not be executable by anyone
self.assertFalse(S_IMODE(mode) & 0o177)
# try to import saved configuration file
config = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
config.import_config(configfile=of)
#Remove the output file
| os.unlink(of)
def testModifyExistingConfig(self):
sssdconfig = SSSDConfig.SSSDConfig(srcdir + "/etc/sssd.api.conf",
srcdir + "/etc/sssd.api.d")
sssdconfig.import_config(srcdir + "/testconfigs/sssd-valid.conf")
ldap_domain = sssdconfig.get_domain('LDAP')
ldap_domain.set_option('debug_level', 3)
ldap_domain.remove_provider('auth')
ldap_domain.add_provider('krb5', 'auth')
| ldap_domain.set_active(True)
sssdconfig.save_domain(ldap_domain)
proxy_domain = sssdconfig.get_domain('PROXY')
proxy_domain.set_option('debug_level', 0x1f10)
sssdconfig.save_domain(proxy_domain)
sudo_responder = sssdco |
soileater/noobest | rank/training.py | Python | mit | 4,006 | 0.007489 | from rank.models import Player
from utils.api import get_watcher
import time
import json
def search(my_id):
if True:
match = get_watcher().get_match_list(my_id,'na')
match_id_list = [i['matchId'] for i | in match['matches'] if i['queue'] == 'TEAM_BUILDER_DRAFT_RANKED_5x5'][:9]
match_details = []
all_players_id = dict()
for match_id in match_id_list:
match_detail = get_watcher().get_match(match_id)
match_details.append(match_detail)
for data in match_detail['participantIdentities']:
if data['player']['summonerId'] in all_players_id:
all_pla | yers_id[data['player']['summonerId']].append([match_detail, data['participantId']])
else:
all_players_id[data['player']['summonerId']] = [[match_detail, data['participantId']]]
friends_id = [key for key, value in all_players_id.items() if len(value) > 1]
for fid in friends_id:
if fid != my_id:
continue
minionsKilled = 0
goldEarned = 0
totalDamageDealtToChampions = 0
wardsPlaced = 0
kills = 0
assists = 0
deaths = 0
damage_per_gold = 0
cs_per_minute = 0
kda = 0
wardsPlaced_per_minute = 0
arrayList_for_rank = []
for data in all_players_id[fid]:
specific_match = data[0]
position = data[1]
match_time = specific_match['matchDuration']
for participants in specific_match['participants'][position - 1]['stats']:
if str(participants) == 'goldEarned':
goldEarned = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'totalDamageDealtToChampions':
totalDamageDealtToChampions = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'minionsKilled':
minionsKilled = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'wardsPlaced':
wardsPlaced = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'kills':
kills = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'assists':
assists = specific_match['participants'][position - 1]['stats'][participants]
elif str(participants) == 'deaths':
deaths = specific_match['participants'][position - 1]['stats'][participants]
damage_per_gold += float(totalDamageDealtToChampions) / float(goldEarned)
cs_per_minute += float(minionsKilled) / float((match_time / 60))
kda += float((kills + assists)) / float((deaths + 1))
wardsPlaced_per_minute += float(wardsPlaced)
damage_per_gold /= len(all_players_id[fid])
cs_per_minute /= len(all_players_id[fid])
kda /= len(all_players_id[fid])
wardsPlaced_per_minute /= len(all_players_id[fid])
data = {
'damage_per_gold': damage_per_gold,
'cs_per_minute': cs_per_minute,
'kda':kda,
'wards_placed_per_minute':wardsPlaced_per_minute
}
return data
count = 0
players = Player.objects.filter(vector__isnull=False)
players = [player for player in players if json.loads(player.vector.replace("'", '"'))['wards_placed_per_minute'] < 1]
for player in players:
try:
player.vector = search(player.userid)
player.save()
time.sleep(2)
except:
pass
count += 1
print float(count) / len(players)
|
Tafkas/solarpi | migrations/versions/721be6649087_.py | Python | bsd-3-clause | 2,933 | 0 | """Initialize database
Revision ID: 721be6649087
Revises: None
Create Date: 2016-05-18 22:42:25.431499
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "721be6649087"
down_revision = None
def upgrade():
# commands auto generated by Alembic - please adjust!
op.create_table(
"electricity_data",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.Text(), nullable=False),
sa.Column("meter_180", sa.Float(), nullable=True),
sa.Column("meter_280", sa.Float(), nullable=True),
sa.Column("active_power", sa.Float(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"pvdata",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_at", sa.Text(), nullable=False),
sa.Column("dc_1_u", sa.Integer(), nullable=True),
sa.Column("dc_1_i", sa.Float(), nullable=True),
sa.Column("ac_1_u", sa.Integer(), nullable=True),
sa.Column("ac_1_p", sa.Integer(), nullable=True),
sa.Column("dc_2_u", sa.Integer(), nullable=True),
sa.Column("dc_2_i", sa.Float(), nullable=True),
sa.Column("ac_2_u", sa.Integer(), nullable=True),
sa.Column("ac_2_p", sa.Integer(), nullable=True),
sa.Column("dc_3_u", sa.Integer(), nullable=True),
sa.Column("dc_3_i", sa.Float(), nullable=True),
sa.Column("ac_3_u", sa.Integer(), nullable=True),
sa.Column("ac_3_p", sa.Integer(), nullable=True),
sa.Column("current_power", sa.Integer(), nullable=True),
sa.Column("daily_energy", sa.Float(), nullable=True),
sa.Column("total_energy", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"weather_data",
sa.Column("id", sa.Integer(), nullable=False),
| sa.Column("created_at", sa.Text(), nullable=False),
sa.Column("temp", sa.Float(), nullable=True),
sa.Column("pressure", sa.Integer(), nullable=True),
sa.Column("temp_min", sa.Float(), nullable=True),
sa.Column("temp_max", sa.Floa | t(), nullable=True),
sa.Column("humidity", sa.Integer(), nullable=True),
sa.Column("wind_speed", sa.Float(), nullable=True),
sa.Column("wind_gust", sa.Float(), nullable=True),
sa.Column("wind_deg", sa.Integer(), nullable=True),
sa.Column("clouds", sa.Integer(), nullable=True),
sa.Column("rain", sa.Integer(), nullable=True),
sa.Column("snow", sa.Integer(), nullable=True),
sa.Column("weather_id", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# end Alembic commands #
def downgrade():
# commands auto generated by Alembic - please adjust! #
op.drop_table("weather_data")
op.drop_table("pvdata")
op.drop_table("electricity_data")
# end Alembic commands #
|
rudivs/TaxonLinker | taxonutils.py | Python | mit | 4,328 | 0.013863 | #!/usr/bin/env python
import csv
import difflib
try:
from settings import FIELD_SEP
except ImportError:
FIELD_SEP = '\t'
class TaxonIndex():
"""
TaxonIndex is a class for reading a taxon dictionary file (which must be
in the form of a tab-separated CSV text file), and matching genera and taxa
against that dictionary using a fuzzy-matching algorithm to deal with
spelling errors.
"""
# Todo: handle if taxonID in fieldnames but not provided for a row
# Todo: does this work with Unicode files?
# Todo: sort the genus lists
def __init__(self,csvfile,delimiter=FIELD_SEP):
self.taxonindex = dict()
self.genusindex = dict()
self.idindex = dict()
self._taxontest = dict()
validheaders = set(['scientificName','taxonID','taxonomicStatus',
'relatedResourceID'])
with open(csvfile,'rb') as f:
try:
dialect = csv.Sniffer().sniff(f.read(2048),delimiters=delimiter)
f.seek(0)
self.reader = csv.DictReader(f, dialect=dialect)
except csv.Error:
f.seek(0)
self.reader = csv.DictReader(f)
self.fieldnames = self.reader.fieldnames
if 'scientificName' in self.fieldnames:
for r in self.reader:
if len(r) != len(self.fieldnames):
raise csv.Error("Number of fields should be "
"%s: %s" % (len(self.fieldnames),str(r)))
self.taxonindex[r['scientificName']] = {k:v for k,v in \
r.items() if k in validheaders-set(['scientificName'])}
if 'taxonID' not in self.fieldnames :
self.taxonindex[r['scientificName']]['taxonID'] = \
r['scientificName']
else:
self.idindex[r['taxonID']] = \
{k:v for k,v in r.items() if k in validheaders-
set(['taxonID'])}
try:
self.genusindex[r['scientificName'].split(' ')[0].\
strip().capitalize()] += [r['scientificName']]
except KeyError:
self.genusindex[r['scientificName'].split(' ')[0].\
strip().capitalize()] = [r['scientificName']]
else:
raise csv.Error("CSV Error: headers must include at least "
"'scientificName'. Current headers: %s" % str(self.fieldnames))
self._taxontest = {n.strip().lower():n for n in self.taxonindex}
def matchgenera(self,genus,n=1,sensitivity=0.85):
"""Returns up to n genera which are similar to the genus of the name
provided.
"""
#Assumes first word is genus
test = genus.strip().split(' ')[0].capitalize()
return difflib.get_close_matches(test,self.genusi | ndex.keys() |
,n,sensitivity)
def matchtaxa(self,t,genus=None,n=1,sensitivity=0.65):
"""Returns up to n taxa which have a similar name to the one
provided. If genus is provided, limits search to that genus.
"""
test = t.strip().lower()
if genus == None:
results = difflib.get_close_matches(test,self._taxontest,n,
sensitivity)
else:
glist = [t.lower() for t in self.genusindex[genus]]
results = difflib.get_close_matches(test,glist,n,sensitivity)
return [self._taxontest[r] for r in results]
def ratio(t1,t2):
"""Returns the closeness of the match between two taxon names, with 1 being
exact.
"""
t1 = t1.strip().lower()
t2 = t2.strip().lower()
return difflib.SequenceMatcher(None,t1,t2).ratio()
if __name__=='__main__':
dict1 = TaxonIndex('test/sn_dict')
dict2 = TaxonIndex('test/id_sn_dict')
print("sn_dict:")
for k,v in dict1.taxonindex.items():
print(k + ": " + str(v))
print("\nid_sn_dict:")
for k,v in dict2.taxonindex.items():
print(k + ": " + str(v))
print
print dict1.matchtaxa('THALASSARCH CHLORORYNCHOS',1,0.9)
|
dipanshunagar/PySyft | syft/mpc/rss/__init__.py | Python | apache-2.0 | 162 | 0 | from .config import PrecisionConfig
from .r | epo import MPCRepo
from .tensor import RSSMPCTensor
| s = str(PrecisionConfig)
s += str(MPCRepo)
s += str(RSSMPCTensor)
|
mozilla/bztools | auto_nag/scripts/workflow/p1.py | Python | bsd-3-clause | 410 | 0 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with t | his file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto | _nag.scripts.workflow.p1_no_activity import P1NoActivity
from auto_nag.scripts.workflow.p1_no_assignee import P1NoAssignee
if __name__ == "__main__":
P1NoAssignee().run()
P1NoActivity().run()
|
bytebit-ch/uguubot | plugins/util/bucket.py | Python | gpl-3.0 | 1,212 | 0 | from time import time
class TokenBucket(object):
"""An implementation of the token bucket algorithm.
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
>>> print bucket.consume(90)
False
"""
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(tokens)
self.fill_rate = float(fill_rate)
self.timestamp = time()
def consume(self, tokens):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens otherwise False."""
if tokens <= self.tokens:
self._tokens -= tokens
else:
return False
return True
def refill(self):
self._tokens = self.capacity
def get_tokens(self):
now = time()
if self._tokens < self.capacity:
| delta = self.fill_rate * (now - self.timestamp)
| self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
tokens = property(get_tokens)
|
showell/zulip | zilencer/migrations/0016_remote_counts.py | Python | apache-2.0 | 2,165 | 0.002309 | # Generated by Django 1.11.18 on 2019-02-02 06:02
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zilencer', '0015_delete_billing'),
]
operations = [
migrations.CreateModel(
name='RemoteInstallationCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property', models.CharField(max_length=32)),
('subgroup', models.CharField(max_length=16, null=True)),
('end_time', models.DateTimeField()),
('value', models.BigIntegerField()),
('remote_id', models.IntegerField(db_index=Tru | e)),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zilencer.RemoteZulipServer')),
],
),
migrations.CreateModel(
name='RemoteRealmCount',
fields=[
('id', models.AutoField( | auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('property', models.CharField(max_length=32)),
('subgroup', models.CharField(max_length=16, null=True)),
('end_time', models.DateTimeField()),
('value', models.BigIntegerField()),
('realm_id', models.IntegerField(db_index=True)),
('remote_id', models.IntegerField(db_index=True)),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='zilencer.RemoteZulipServer')),
],
),
migrations.AlterUniqueTogether(
name='remoterealmcount',
unique_together={('server', 'realm_id', 'property', 'subgroup', 'end_time')},
),
migrations.AlterIndexTogether(
name='remoterealmcount',
index_together={('property', 'end_time')},
),
migrations.AlterUniqueTogether(
name='remoteinstallationcount',
unique_together={('server', 'property', 'subgroup', 'end_time')},
),
]
|
0ps/wfuzz | src/wfuzz/externals/moduleman/plugin.py | Python | gpl-2.0 | 482 | 0 | import collections |
def moduleman_plugin(*args):
method_args = []
def inner_decorator(cls):
for method in method_args:
if (not (method in dir(cls))):
raise Exception("Required method %s not implemented" % method)
| cls.__PLUGIN_MODULEMAN_MARK = "Plugin mark"
return cls
if not isinstance(args[0], collections.Callable):
method_args += args
return inner_decorator
return inner_decorator(args[0])
|
jansohn/pyload | module/plugins/hoster/FileSharkPl.py | Python | gpl-3.0 | 3,815 | 0.007602 | # -*- coding: utf-8 -*-
import re
import urlparse
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class FileSharkPl(SimpleHoster):
__name__ = "FileSharkPl"
__type__ = "hoster"
__version__ = "0.15"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?fileshark\.pl/pobierz/\d+/\w+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """FileShark.pl hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("prOq", None),
("Walter Purcaro", "vuolter@gmail.com")]
NAME_PATTERN = r'<h2 class="name-file">(?P<N>.+?)</h2>'
SIZE_PATTERN = r'<p class="size-file">(.*?)<strong>(?P<S>\d+\.?\d*)\s(?P<U>\w+)</strong></p>'
OFFLINE_PATTERN = r'(P|p)lik zosta. (usuni.ty|przeniesiony)'
LINK_FREE_PATTERN = r'<a rel="nofollow" href="(.*?)" class="btn-upload-free">'
LINK_PREMIUM_PATTERN = r'<a rel="nofollow" href="(.*?)" class="btn-upload-premium">'
WAIT_PATTERN = r'var timeToDownload = (\d+);'
ERROR_PATTERN = r'<p class="lead text-center alert alert-warning">(.*?)</p>'
IP_ERROR_PATTERN = r'Strona jest dost.pna wy..cznie dla u.ytkownik.w znajduj.cych si. na terenie Polski'
SLOT_ERROR_PATTERN = r'Osi.gni.to maksymaln. liczb. .ci.ganych jednocze.nie plik.w\.'
CAPTCHA_PATTERN = r'<img src="data:image/jpeg;base64,(.*?)" title="captcha"'
TOKEN_PATTERN = r'name="form\[_token\]" value="(.*?)" />'
def setup(self):
self.resume_download = True
if self.premium:
self.multiDL = True
self.limitDL = 20
else:
self.multiDL = False
def check_errors(self):
#: Check if file is now available for download (-> file name can be found in html body)
m = re.search(self.WAIT_PATTERN, self.html)
if m is not None:
errmsg = self.info['error'] = _("Another download already run")
self.retry(15, int(m.group(1)), errmsg)
m = re.search(self.ERROR_PATTERN, self.html)
if m is not None:
alert = m.group(1)
if re.match(self.IP_ERROR_PATTERN, alert):
self.fail(_("Only connections from Polish IP are allowed"))
elif re.match(self.SLOT_ERROR_PATTERN, alert):
errmsg = self.info['error'] = _("No free download slots available")
self.log_warning(errmsg)
self.retry(10, 30 * 60, _("Still no free download slots available"))
else:
self.info['error'] = alert
self.retry(10, 10 * 60, _("Try again later"))
self.info.pop('error', | None)
def handle_free(self, pyfile):
| m = re.search(self.LINK_FREE_PATTERN, self.html)
if m is None:
self.error(_("Download url not found"))
link = urlparse.urljoin("http://fileshark.pl/", m.group(1))
self.html = self.load(link)
m = re.search(self.WAIT_PATTERN, self.html)
if m is not None:
seconds = int(m.group(1))
self.log_debug("Wait %s seconds" % seconds)
self.wait(seconds)
action, inputs = self.parse_html_form('action=""')
m = re.search(self.TOKEN_PATTERN, self.html)
if m is None:
self.retry(msg=_("Captcha form not found"))
inputs['form[_token]'] = m.group(1)
m = re.search(self.CAPTCHA_PATTERN, self.html)
if m is None:
self.retry(msg=_("Captcha image not found"))
inputs['form[captcha]'] = self.captcha.decrypt_image(m.group(1).decode('base64'), input_type='jpeg')
inputs['form[start]'] = ""
self.download(link, post=inputs, disposition=True)
getInfo = create_getInfo(FileSharkPl)
|
LBatsoft/python3-webapp | www/pymonitor.py | Python | gpl-3.0 | 1,748 | 0.006865 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "pwxc"
import os, sys, time, subprocess
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def log(s):
print('[Monitor] %s' % s)
class MyFileSystemEventHander(FileSystemEventHandler):
def __init__(self, fn):
super(MyFileSystemEventHander, self).__init__()
self.restart = fn
def on_any_event(self, event):
if event.src_path.endswith('.py'):
log('Python source file changed: %s' % event.src_path)
self.restart()
command = ['echo', 'ok']
process = None
def kill_process():
global process
if process:
log('Kill process [%s]...' % process.pid)
process.kill()
process.wait()
log('Process ended with | code %s.' % process.returncode)
process = None
def start_process():
global process, command
log('Start process %s...' % ' '.join(command))
process = subprocess.Popen(command, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
def restart_process():
kill_process()
start_process()
def start_watch(path, callback):
observer = Observer()
observer.schedule(MyFileSystemEventHander(restart_process), path, recursive=True)
observer.sta | rt()
log('Watching directory %s...' % path)
start_process()
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
argv = sys.argv[1:]
if not argv:
print('Usage: ./pymonitor your-script.py')
exit(0)
if argv[0] != 'python':
argv.insert(0, 'python')
command = argv
path = os.path.abspath('.')
start_watch(path, None) |
Microsoft/ApplicationInsights-Python | tests/applicationinsights_tests/channel_tests/contracts_tests/TestMessageData.py | Python | mit | 2,310 | 0.007359 | import unittest
import datetime
import uuid
import sys
import json
import sys, os, os.path
root_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', '..')
if root_directory not in sys.path:
sys.path.append(root_directory)
from applicationinsights.channel.contracts import *
from .Utils import TestJsonEncoder
class TestMessageData(unittest.TestCase):
def test_construct(self):
item = MessageData()
self.assertNotEqual(item, None)
def test_ver_property_works_as_expected(self):
expected = 42
item = MessageData()
item.ver = expected
actual = item.ver
self.a | ssert | Equal(expected, actual)
expected = 13
item.ver = expected
actual = item.ver
self.assertEqual(expected, actual)
def test_message_property_works_as_expected(self):
expected = 'Test string'
item = MessageData()
item.message = expected
actual = item.message
self.assertEqual(expected, actual)
expected = 'Other string'
item.message = expected
actual = item.message
self.assertEqual(expected, actual)
def test_severity_level_property_works_as_expected(self):
expected = object()
item = MessageData()
item.severity_level = expected
actual = item.severity_level
self.assertEqual(expected, actual)
expected = object()
item.severity_level = expected
actual = item.severity_level
self.assertEqual(expected, actual)
def test_properties_property_works_as_expected(self):
item = MessageData()
actual = item.properties
self.assertNotEqual(actual, None)
def test_serialize_works_as_expected(self):
item = MessageData()
item.ver = 42
item.message = 'Test string'
item.severity_level = object()
for key, value in { 'key1': 'test value 1' , 'key2': 'test value 2' }.items():
item.properties[key] = value
actual = json.dumps(item.write(), separators=(',', ':'), cls=TestJsonEncoder)
expected = '{"ver":42,"message":"Test string","severityLevel":{},"properties":{"key1":"test value 1","key2":"test value 2"}}'
self.assertEqual(expected, actual)
|
giacomov/3ML | threeML/utils/data_builders/time_series_builder.py | Python | bsd-3-clause | 44,553 | 0.001369 | import copy
import re
import astropy.io.fits as fits
import numpy as np
from threeML.exceptions.custom_exceptions import custom_warnings
from threeML.io.file_utils import file_existing_and_readable
from threeML.io.progress_bar import progress_bar
from threeML.plugins.DispersionSpectrumLike import DispersionSpectrumLike
from threeML.plugins.OGIPLike import OGIPLike
from threeML.plugins.SpectrumLike import NegativeBackground, SpectrumLike
from threeML.utils.data_builders.fermi.gbm_data import GBMCdata, GBMTTEFile
from threeML.utils.data_builders.fermi.lat_data import LLEFile
from threeML.utils.histogram import Histogram
from threeML.utils.OGIP.pha import PHAWrite
from threeML.utils.OGIP.response import (InstrumentResponse,
InstrumentResponseSet, OGIPResponse)
from threeML.utils.polarization.binned_polarization import \
BinnedModulationCurve
from threeML.utils.spectrum.binned_spectrum import (
BinnedSpectrum, BinnedSpectrumWithDispersion)
from threeML.utils.statistics.stats_tools import Significance
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.binned_spectrum_series import \
BinnedSpectrumSeries
from threeML.utils.time_series.event_list import (
EventList, EventListWithDeadTime, EventListWithDeadTimeFraction,
EventListWithLiveTime)
from threeML.utils.time_series.time_series import TimeSeries
try:
from polarpy.polar_data import POLARData
from polarpy.polarlike import PolarLike
from polarpy.polar_response import PolarResponse
has_polarpy = True
except (ImportError):
has_polarpy = False
try:
import gbm_drm_gen
has_balrog = True
except (ImportError):
has_balrog = False
class BinningMethodError(RuntimeError):
pass
class TimeSeriesBuilder(object):
def __init__(
self,
name,
time_series,
response=None,
poly_order=-1,
unbinned=True,
verbose=True,
restore_poly_fit=None,
container_type=BinnedSpectrumWithDispersion,
**kwargs
):
"""
Class for handling generic time series data including binned and event list
series. Depending on the data, this class builds either a SpectrumLike or
DisperisonSpectrumLike plugin
For specific instruments, use the TimeSeries.from() classmethods
:param name: name for the plugin
:param time_series: a TimeSeries instance
:param response: options InstrumentResponse instance
:param poly_order: the polynomial order to use for background fitting
:param unbinned: if the background should be fit unbinned
:param verbose: the verbosity switch
:param restore_poly_fit: file from which to read a prefitted background
"""
assert isinstance(
time_series, TimeSeries), "must be a TimeSeries instance"
assert issubclass(
container_type, Hist | ogram), "must be a subclass of Histogram"
self._name = name
self._container_type = container_type
self._time_series = time_series # type: TimeSeries
# make sure we have a proper response
if response is not None:
assert (
isinstance(response, InstrumentResponse)
or isinstance(response, InstrumentR | esponseSet)
or isinstance(response, str)
), "Response must be an instance of InstrumentResponse"
# deal with RSP weighting if need be
if isinstance(response, InstrumentResponseSet):
# we have a weighted response
self._rsp_is_weighted = True
self._weighted_rsp = response
# just get a dummy response for the moment
# it will be corrected when we set the interval
self._response = InstrumentResponse.create_dummy_response(
response.ebounds, response.monte_carlo_energies
)
else:
self._rsp_is_weighted = False
self._weighted_rsp = None
self._response = response
self._verbose = verbose
self._active_interval = None
self._observed_spectrum = None
self._background_spectrum = None
self._measured_background_spectrum = None
self._time_series.poly_order = poly_order
self._default_unbinned = unbinned
# try and restore the poly fit if requested
if restore_poly_fit is not None:
if file_existing_and_readable(restore_poly_fit):
self._time_series.restore_fit(restore_poly_fit)
if verbose:
print("Successfully restored fit from %s" %
restore_poly_fit)
else:
custom_warnings.warn(
"Could not find saved background %s." % restore_poly_fit
)
if "use_balrog" in kwargs:
self._use_balrog = kwargs["use_balrog"]
else:
self._use_balrog = False
def _output(self):
pass
# super_out = super(EventListLike, self)._output()
# return super_out.append(self._time_series._output())
def __set_poly_order(self, value):
"""Background poly order setter """
self._time_series.poly_order = value
def ___set_poly_order(self, value):
""" Indirect poly order setter """
self.__set_poly_order(value)
def __get_poly_order(self):
""" Get poly order """
return self._time_series.poly_order
def ___get_poly_order(self):
""" Indirect poly order getter """
return self.__get_poly_order()
background_poly_order = property(
___get_poly_order,
___set_poly_order,
doc="Get or set the background polynomial order",
)
def set_active_time_interval(self, *intervals, **kwargs):
"""
Set the time interval to be used during the analysis.
For now, only one interval can be selected. This may be
updated in the future to allow for self consistent time
resolved analysis.
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
set_active_time_interval("0.0-10.0")
which will set the energy range 0-10. seconds.
:param options:
:param intervals:
:return:
"""
self._time_series.set_active_time_intervals(*intervals)
# extract a spectrum
if self._response is None:
self._observed_spectrum = self._container_type.from_time_series(
self._time_series, use_poly=False
)
else:
if self._rsp_is_weighted:
self._response = self._weighted_rsp.weight_by_counts(
*self._time_series.time_intervals.to_string().split(",")
)
self._observed_spectrum = self._container_type.from_time_series(
self._time_series, self._response, use_poly=False
)
self._active_interval = intervals
# re-get the background if there was a time selection
if self._time_series.poly_fit_exists:
self._background_spectrum = self._container_type.from_time_series(
self._time_series, response=self._response, use_poly=True, extract=False
)
self._measured_background_spectrum = self._container_type.from_time_series(
self._time_series,
response=self._response,
use_poly=False,
extract=True,
)
self._tstart = self._time_series.time_intervals.absolute_start_time
self._tstop = self._time_series.time_intervals.absolute_stop_time
def set_background_interval(self, *intervals, **options):
"""
Set the time interval to fit the background.
Multiple intervals can be input as separate arguments
Specified as 'tmin-tmax'. Intervals are in seconds. Example:
setBackgroundInterval("-10.0-0.0","10.-15.")
:param *intervals:
:param **options:
:return: none
|
emesene/emesene | emesene/gui/common/TrayIcon.py | Python | gpl-3.0 | 14,327 | 0.001466 | # -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import sys
import gtk
import gobject
import time
from gui.gtkui import check_gtk3
if check_gtk3():
from gi.repository import Gio
import extension
import e3
from e3 import status
import gui
import gui.gtkui.utils as utils
import gui.gtkui.StatusMenu as StatusMenu
from gui import Plus
from NumerableTrayIcon import NumerableTrayIcon
class TrayIcon(gtk.StatusIcon, NumerableTrayIcon):
"""
A widget that implements the tray icon of emesene for gtk
"""
NAME = 'Tray Icon'
DESCRIPTION = 'The gtk tray icon'
AUTHOR = 'Mariano Guerra'
WEBSITE = 'www.emesene.org'
def __init__(self, handler, main_window=None):
"""
constructor
handler -- a gui.base.Handler.TrayIconHandler object
"""
NumerableTrayIcon.__init__(self, handler)
gtk.StatusIcon.__init__(self)
self.main_window = main_window
self.menu = None
self.tag = None # keeps the gobject id for the windows-hide-menu hack
# message count icon
self.unread_icon = None
self.connect('activate', self._on_activate)
self.connect('popup-menu', self._on_popup)
self.set_login()
gtk.StatusIcon.set_visible(self, True)
self.set_tooltip("emesene")
def set_login(self):
"""
method called to set the state to the login window
"""
self.menu = LoginMenu(self.handler, self.main_window)
self.menu.show_all()
if sys.platform == "win32":
self.menu.connect('leave-notify-event', self.on_leave_notify_event)
self.menu.connect('enter-notify-event', self.on_enter_notify_event)
self.set_from_file(self.handler.theme.image_theme.logo_panel)
def set_main(self, session):
"""
method called to set the state to the main window
"""
gui.BaseTray.set_main(self, session)
if self.menu:
self.menu.unsubscribe()
self.menu = MainMenu(self.handler, self.main_window)
self.menu.show_all()
if sys.platform == "win32":
self.menu.connect('leave-notify-event', self.on_leave_notify_event)
self.menu.connect('enter-notify-event', self.on_enter_notify_event)
self.set_tooltip("emesene - " + self.handler.session.account.account)
self._on_status_change_succeed(self.handler.session.account.status)
def windows_workaround(self, *args):
if self.menu:
self.menu.hide()
def on_leave_notify_event(self, *args):
"""
callback called when the mouse leaves this window
"""
if self.tag is None:
self.tag = gobject.timeout_add_seconds(1, self.windows_workaround)
def on_enter_notify_event(self, *args):
"""
callback called when the mouse enters this window
"""
if self.tag:
gobject.source_remove(self.tag)
self.tag = None
def _on_activate(self, trayicon):
"""
callback called when the status icon is activated
(includes clicking the icon)
"""
if self.last_new_message is not None and (self.count != 0):
# show the tab with the latest message
cid = self.last_new_message
conv_manager = self.handler.session.get_conversation_manager(cid)
if conv_manager:
conversation = conv_manager.has_similar_conversation(cid)
conv_manager.present(conversation)
else:
self.handler.on_hide_show_mainwindow(self.main_window)
def _on_status_change_succeed(self, stat):
"""
change the icon in the tray according to user's state
"""
if stat not in status.ALL or stat == -1:
return
if check_gtk3():
self.unread_icon = self._get_numerable_icon_for_status(stat)
self._update_numerable_icon(self.count)
else:
self.set_from_file(self.handler.theme.image_theme.status_icons_panel[stat])
def count_changed(self, count):
'''method called when unread message count changes'''
if check_gtk3():
self._update_numerable_icon(count)
else:
self.set_blinking(count != 0)
def _get_numerable_icon_for_status(self, stat):
'''create a new Numerable icon with current status as base image'''
icon_path = self.handler.theme.image_theme.status_icons_panel[stat]
gfile = Gio.File.new_for_path(icon_path)
gicon = Gio.FileIcon.new(gfile)
return gtk.NumerableIcon.new(gicon)
def _update_numerable_icon(self, count):
self.unread_icon.set_count(count)
self.set_from_gicon(self.unread_icon)
def _on_popup(self, trayicon, button, activate_time):
"""
callback called when the popup of the status icon is activated
(usually through right-clicking the status icon)
"""
position = None
user_data = None
if os.name == 'mac' or sys.platform == 'linux2' or sys.platform == 'linux3':
position = gtk.status_icon_position_menu
user_data = trayicon
self.menu.popup(None, None, position, button, activate_time, user_data)
def _on_contact_attr_changed(self, *args):
"""
This is called when a contact changes something
"""
self.menu.list_contacts._on_contact_change_something(*args)
def hide(self):
self.unsubscribe()
gtk.StatusIcon.set_visible(self, False)
def unsubscribe(self):
self.disconnect_signals()
if self.menu:
self.menu.unsubscribe()
class LoginMenu(gtk.Menu):
"""
a widget that represents the menu displayed on the trayicon on the
login window
"""
def __init__(self, handler, main_window=None):
"""
constructor
handler -- a e3common.Handler.TrayIconHandler object
"""
gtk.Menu.__init__(self)
self.handler = handler
self.hide_show_mainwindow = gtk.MenuItem(_('Hide/ | Show emesene'))
self.hide_show_mainwindow.connect('activate',
lambda *args: self.handler.on_hide_show_mainwindow(main_window))
self.quit = gtk.ImageMenuItem(gtk.STOCK_QUIT)
| self.quit.connect('activate',
lambda *args: self.handler.on_quit_selected())
self.append(self.hide_show_mainwindow)
self.append(self.quit)
def unsubscribe(self):
pass
class MainMenu(gtk.Menu):
"""
a widget that represents the menu displayed on the trayicon on the
main window
"""
def __init__(self, handler, main_window=None):
"""
constructor
handler -- a e3common.Handler.TrayIconHandler object
"""
gtk.Menu.__init__(self)
self.handler = handler
self.status = gtk.ImageMenuItem(_('Status'))
self.status.set_image(gtk.image_new_from_stock(gtk.STOCK_CONVERT,
gtk.ICON_SIZE_MENU))
self.status_menu = StatusMenu.StatusMenu(handler.on_status_selected)
self.status.set_submenu(self.status_menu)
self.list = gtk.ImageMenuItem(_('Contacts'))
self.list.set_image(utils.safe_gtk_image_load(gui.theme.image_theme.chat))
self.list_contacts = ContactsMenu(handler, main_window)
self.list.set_submenu(self.list_contacts)
self.hide_show_mainw |
openweave/openweave-core | src/device-manager/python/openweave/WeaveUtility.py | Python | apache-2.0 | 2,006 | 0.001994 | #
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# This file is utility for Weave
#
from __future__ import absolute_import
from __future__ import print_function
import binascii
import logging
from ctypes import *
import six
import sys
class WeaveUtility(object):
@staticmethod
def Hexlify(val):
return binascii.hexlify(val).decode()
@staticmethod
def VoidPtrToByteArray(ptr, len):
if ptr:
v = bytearray(len)
memmove((c_byte * len).from_buffer(v), ptr, len)
return v
else:
return None
@staticmethod
def ByteArrayToVoidPtr(array):
if array != None:
if not (isinstance(array, bytes) or isinstance(array, bytearray)):
raise TypeError("Array must be an str or a bytearray")
return cast( (c_byte * len(array)) .from_buffer_copy(array), c_void_p)
else:
return c_void_p(0)
@staticmethod
def IsByteArrayAllZeros(array):
for i in range(len(array)):
if (array[i] != 0):
r | eturn False
return True
@staticmethod
def ByteArrayToHex(arr | ay):
return WeaveUtility.Hexlify(bytes(array))
@staticmethod
def CStringToString(s):
return None if s is None else s.decode()
@staticmethod
def StringToCString(s):
return None if s is None else s.encode()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.