repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
sandow-digital/django-mptt-sandow | tests/myapp/models.py | Python | mit | 2,252 | 0.003552 | from django.db import models
from mptt2.models import MPTTModel
from mptt2.managers import TreeManager
class CustomTreeManager(TreeManager):
pass
class Category(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
def delete(self):
super(Category, self).delete()
class Genre(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
class Insert(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MultiOrder(MPTTModel):
name = models.CharField(max_length=50)
size = models.PositiveIntegerField()
date = models.DateField()
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name', 'size', 'date']
def __unicode__(self):
return self.name
class Node(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
left_attr = 'does'
right_attr = 'zis'
level_attr = 'madness'
tree_id_attr = 'work'
class OrderedInsertion(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class MPTTMeta:
order_insertion_by = ['name']
def __unicode__(self): |
retur | n self.name
class Tree(MPTTModel):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
class Person(MPTTModel):
name = models.CharField(max_length=50)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
# just testing it's actually possible to override the tree manager
objects = models.Manager()
my_tree_manager = CustomTreeManager()
class MPTTMeta:
tree_manager_attr = 'my_tree_manager'
def __unicode__(self):
return self.name
class Student(Person):
type = models.CharField(max_length=50)
|
sim1234/Versioning | py_versioning/db/__init__.py | Python | gpl-2.0 | 103 | 0.019417 | #from .engine import CheckVersion | , CreateVersion
from .ver | sioner import DBVersioner, DBVersionCommander |
xuewei4d/scikit-learn | sklearn/datasets/_covtype.py | Python | bsd-3-clause | 6,567 | 0 | """Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import dirname, exists, join
from os import remove, makedirs
import numpy as np
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ._base import _pkl_filepath
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
# The original data can be found in:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename='covtype.data.gz',
url='https://ndownloader.figshare.com/files/5976039',
checksum=('614360d0257557dd1792834a85a1cdeb'
'fadc3c4f30b011d56afee7ffb5b15771'))
logger = logging.getLogger(__name__)
# Column names reference:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
FEATURE_NAMES = ["Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points"]
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
FEATURE_NAMES += [f"So | il_Type_{i}" for i in range(40)]
TARGET_NAMES = ["Cover_Type"]
@_deprecate_positional_args
def fetch_covtype(*, data_home | =None, download_if_missing=True,
random_state=None, shuffle=False, return_X_y=False,
as_frame=False):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 53)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
if not exists(covtype_dir):
makedirs(covtype_dir)
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',')
# delete archive
remove(archive_path)
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'covtype.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr)
|
ctu-yfsg/2015-a-grass-reclass | reclassify/Layout/ReclassifyPanel.py | Python | gpl-2.0 | 3,344 | 0.008373 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Subclass of wx.Panel"""
#-----------------------------------------------------------------------------------------
#Import
try:
#wxPython
import wx
import wx.grid
import wx.lib.scrolledpanel
#python std library
import sys
#our modules and packages
except ImportError as err:
print(u"ImportError: {}".format(err))
sys.exit("-1")
#-----------------------------------------------------------------------------------------
class ReclassifyPanel(wx.Panel):
"""
Subclass of wx.Panel.
Represents top-right part of the window.
Contains reclassification table, delete entry
button and add entry button.
"""
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
#TABLE
self.tableCol = 4
self.__buildTable()
#BUTTONS
self.__buildButtonPanel()
#LAYOUT
self.__layout()
#self.SetMinSize((400, -1))
#-----------------------------------------------------------------------------------------
def __buildTable(self):
"""
Creates table for displaying mapset classification.
Table is made using wx.grid.Grid.
:return: void
"""
self.tablePanel = wx.Panel(self, wx.NewId())
self.table = wx.grid.Grid(self.tablePanel)
self.table.SetMinSize((430, -1))
self.table.CreateGrid(0, self.tableCol)
self.table.SetColLabelValue(0, "Lower limit")
self.table.SetColLabelValue(1, "Upper limit")
self.table.SetColLabelValue(2, "Value")
self.table.SetColLabelValue(3, "Label")
self.table.SetDefaultEditor(wx.grid.GridCellFloatEditor(-1, -1))
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.table, wx.EXPAND, wx. | CENTER)
self.tablePanel.SetSizer(box)
#-----------------------------------------------------------------------------------------
def __buildButtonPanel(self):
"""
Creates delete entry button and add | entry button.
:return: void
"""
self.buttonPanel = wx.Panel(self, wx.NewId())
self.addButton = wx.Button(self.buttonPanel, wx.NewId(), "Add", size=(100, -1))
self.deleteButton = wx.Button(self.buttonPanel, wx.NewId(), "Delete", size=(100, -1))
self.previewButton = wx.Button(self.buttonPanel, wx.NewId(), "Preview", size=(100, -1))
vBox = wx.BoxSizer(wx.VERTICAL)
vBox.Add(self.addButton, 0, wx.ALIGN_CENTER)
vBox.Add(self.deleteButton, 0, wx.ALIGN_CENTER)
vBox.Add(self.previewButton, 0, wx.ALIGN_CENTER)
self.buttonPanel.SetSizer(vBox)
#-----------------------------------------------------------------------------------------
def __layout(self):
"""
Specifies final layout in Reclassify Panel
:return: void
"""
margin = 5
sBox = wx.StaticBox(self, wx.NewId(), "Reclassification Table")
hBox = wx.StaticBoxSizer(sBox, wx.HORIZONTAL)
hBox.Add(self.tablePanel, 0, wx.ALL | wx.CENTER | wx.EXPAND, margin)
hBox.Add(self.buttonPanel, 0, wx.ALL | wx.ALIGN_TOP, margin)
self.SetSizer(hBox)
#-----------------------------------------------------------------------------------------
if __name__ == "__main__":
pass |
indigo-dc/im | test/loadtest/LoadTestRESTR.py | Python | gpl-3.0 | 5,151 | 0.000777 | #! /usr/bin/env python
#
# IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hop | e that it will be useful,
# but WITHOUT | ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from multiprocessing import Process
import unittest
import time
import sys
import os
import random
import datetime
import requests
import json
sys.path.append("..")
sys.path.append(".")
RADL_ADD = "network publica\nnetwork privada\nsystem wn\ndeploy wn 1"
TESTS_PATH = os.path.dirname(os.path.realpath(__file__))
RADL_FILE = TESTS_PATH + '/load-test.radl'
AUTH_FILE = TESTS_PATH + '/auth.dat'
HOSTNAME = "imservice"
TEST_PORT = 8800
MIN_SLEEP = 0
MAX_SLEEP = 5
class LoadTest(unittest.TestCase):
server = None
auth_data = None
inf_id = 0
response_times = []
@classmethod
def setUpClass(cls):
cls.auth_data = open(AUTH_FILE, 'r').read().replace("\n", "\\n")
cls.inf_id = 0
@classmethod
def tearDownClass(cls):
# Assure that the infrastructure is destroyed
try:
headers = {'AUTHORIZATION': cls.auth_data}
url = "http://%s:%d%s" % (HOSTNAME, TEST_PORT, "/infrastructures/" + cls.inf_id)
requests.request("DELETE", url, headers=headers)
except Exception:
pass
@staticmethod
def wait():
mint = MIN_SLEEP
maxt = MAX_SLEEP
delay = random.uniform(mint, maxt)
time.sleep(delay)
def create_request(self, method, path, headers=None, body=None):
before = time.time()
if headers is None:
headers = {'AUTHORIZATION': self.auth_data}
elif headers != {}:
if 'AUTHORIZATION' not in headers:
headers['AUTHORIZATION'] = self.auth_data
url = "http://%s:%d%s" % (HOSTNAME, TEST_PORT, path)
resp = requests.request(method, url, headers=headers, data=body)
resp_time = time.time() - before
self.__class__.response_times.append(resp_time)
return resp
def test_10_list(self):
resp = self.create_request("GET", "/infrastructures")
self.assertEqual(resp.status_code, 200,
msg="ERROR listing user infrastructures:" + resp.text)
if resp.text:
for inf_id in resp.text.split("\n"):
inf_id = os.path.basename(inf_id)
self.getinfo(inf_id)
self.getstate(inf_id)
self.print_response_times()
def getinfo(self, inf_id):
resp = self.create_request("GET", "/infrastructures/" + inf_id)
self.assertEqual(resp.status_code, 200,
msg="ERROR getting the infrastructure info:" + resp.text)
def getstate(self, inf_id):
resp = self.create_request("GET", "/infrastructures/" + inf_id + "/state")
self.assertEqual(
resp.status_code, 200, msg="ERROR getting the infrastructure state:" + resp.text)
res = json.loads(resp.text)
state = res['state']['state']
vm_states = res['state']['vm_states']
def print_response_times(self):
total = 0.0
for time in self.response_times:
total += time
print("Mean Time: %.4f" % (total / len(self.response_times)))
def test(num_client):
now = datetime.datetime.now()
print(now, ": Launch client num: %d" % num_client)
unittest.main()
now = datetime.datetime.now()
print(now, ": End client num: %d" % num_client)
if __name__ == '__main__':
MAX_THREADS = 1
MAX_CLIENTS = 1
DELAY = 1
if len(sys.argv) > 4:
MAX_SLEEP = float(sys.argv[4])
del sys.argv[4]
if len(sys.argv) > 3:
DELAY = float(sys.argv[3])
del sys.argv[3]
if len(sys.argv) > 2:
MAX_CLIENTS = int(sys.argv[1])
MAX_THREADS = int(sys.argv[2])
del sys.argv[1]
del sys.argv[1]
elif len(sys.argv) > 1:
MAX_CLIENTS = MAX_THREADS = int(sys.argv[1])
del sys.argv[1]
processes = []
remaining = MAX_CLIENTS
while remaining > 0:
now = datetime.datetime.now()
while len(processes) < MAX_THREADS:
p = Process(target=test, args=(MAX_CLIENTS - remaining,))
p.start()
processes.append(p)
remaining -= 1
while len(processes) >= MAX_THREADS:
new_processes = []
for p in processes:
if p.is_alive():
new_processes.append(p)
processes = new_processes
if len(processes) >= MAX_THREADS:
time.sleep(DELAY)
|
sergpolly/FluUtils | FluDB_coding_aln/DEPRECEATED/cluster_msa_processing.py | Python | mit | 2,495 | 0.00521 | import re
import os
import sys
from Bio import Seq
from Bio import SeqIO
import pandas as pd
import itertools
import numpy as np
path = "/home/venevs/fludb_pH1N1"
# seg5-GenomicFastaResults.fasta
# seg7.afa
centroids = SeqIO.par | se(os.path.join(path,"nr.fasta"),"fasta")
clust_msa = SeqIO.parse(os.path.join(path,"seg1.afa"),"fasta")
# just check how centroids are aligned in the clust_msa:
# select centroids and find them in clust_msa -> extract their alignemnt ...
centroids = SeqIO.to_dict(centroids)
clust_msa = SeqIO.to_dict(clust_msa)
# simple test to check if all ids from centroids a | re present in clust_msa ...
all_match = all([(sid in clust_msa) for sid in centroids])
if not all_match:
raise ValueError("All ids from centroids must match ids in clust_msa ...")
# now extract aligned centroid sequences ...
centroids_aligned = [clust_msa[sid] for sid in centroids]
# output the alignment now ...
SeqIO.write(centroids_aligned,"centrs.afa","fasta")
# # now let's gather the statistics about each of the segments ...
# # segments with lots of non-ATGC letters ...
# # countATGC = {}
# # length = {}
# data = {}
# for seg in segments:
# get_ATGC = lambda seq: seq.count('A')+seq.count('T')+seq.count('G')+seq.count('C')
# get_strain = lambda seqrec: seqrec.description.strip().split('|')[1]
# countATGC = []
# length = []
# strain = []
# for seqrec in segments[seg]:
# countATGC.append(get_ATGC(seqrec.seq))
# length.append(len(seqrec))
# strain.append(get_strain(seqrec))
# #
# data[seg] = pd.DataFrame({"atgc":countATGC,"len":length,"strain":strain})
# data[seg]["not_atgc"] = data[seg]["len"] - data[seg]["atgc"]
# filtered_index = {}
# length_threshold = 0.9
# #
# print "filtering length cutoff %.2f"%length_threshold
# for seg in segments:
# filtered_index[seg] = np.asarray(data[seg][(data[seg]["not_atgc"]==0)&(data[seg]["len"]>length_threshold*data[seg]["len"].median())].index)
# # dat = pd.DataFrame({})
# for seg in sorted(segments):
# msg = "%s: total_seq=%d filtered_seq=%d disregarded_seq=%d"%(seg,len(segments[seg]),filtered_index[seg].size,len(segments[seg]) - filtered_index[seg].size)
# print msg
# for seg in segments:
# out_fname = os.path.join(path,"%s.fasta"%seg)
# SeqIO.write( (segments[seg][idx] for idx in filtered_index[seg]), out_fname, "fasta" )
|
mozilla/dragnet | dragnet/dll/forms.py | Python | bsd-3-clause | 534 | 0 | from django import forms
from dragnet.dll.models import File, Comment
class FileForm(forms.ModelForm):
"""Using a model form to expedite the crea | tion of DLL records"""
class Meta:
model = File
exclude = ('date_created', 'date_modified', 'created_by',
| 'modified_by', )
class CommentForm(forms.ModelForm):
"""Comment form for DLL comments"""
class Meta:
model = Comment
exclude = ('user', 'date', 'dll')
class SearchForm(forms.Form):
term = forms.CharField()
|
kedder/ofxstatement | src/ofxstatement/ui.py | Python | gpl-3.0 | 275 | 0 | import logging
log = logging. | getLogger(__name__)
class UI:
def status(self, message: str) -> None:
log.info(message)
def warning(self, message: str) -> None:
log.warn(message)
def error(self, message: str) -> None:
| log.error(message)
|
JanBerktold/redis-simpler-cache | redis_cache/rediscache.py | Python | bsd-3-clause | 11,363 | 0.032122 | """
A simple redis-cache interface for storing python objects.
"""
from functools import wraps
import pickle
import json
import hashlib
import redis
import logging
DEFAULT_EXPIRY = 60 * 60 * 24
class RedisConnect(object):
"""
A simple object to store and pass database connection information.
This makes the Simple Cache class a little more flexible, for cases
where redis connection configuration needs customizing.
"""
def __init__(self, host=None, port=None, db=None, password=None):
self.host = host if host else 'localhost'
self.port = port if port else 6379
self.db = db if db else 0
self.password = password
def connect(self):
"""
We cannot assume that connection will succeed, as such we use a ping()
method in the redis client library to validate ability to contact redis.
RedisNoConnException is raised if we fail to ping.
:return: redis.StrictRedis Connection Object
"""
try:
connection = redis.StrictRedis(host=self.host, port=self.port, password=self.password)
connection.ping()
except redis.ConnectionError as e:
raise RedisNoConnException("Failed to create connection to redis", (self.host, self.port))
return connection
class CacheMissException(Exception):
pass
class ExpiredKeyException(Exception):
pass
class RedisNoConnException(Exception):
pass
class DoNotCache(Exception):
_result = None
def __init__(self, result):
super(DoNotCache, self).__init__()
self._result = result
@property
def result(self):
return self._result
class SimpleCache(object):
def __init__(self,
limit=10000,
expire=DEFAULT_EXPIRY,
hashkeys=False,
host=None,
port=None,
db=None,
redis=None,
password=None,
namespace="SimpleCache"):
if redis != None:
try:
redis.ping()
except:
raise RedisNoConnException("Failed to create connection with passed redis object")
self.limit = limit # No of json encoded strings to cache
self.expire = expire # Time to keys to expire in seconds
self.prefix = namespace
self.host = host
self.port = port
self.db = db
try:
self.connection = RedisConnect(host=self.host,
port=self.port,
db=self.db,
password=password).connect()
except (RedisNoConnException, e):
self.connection = None
pass
# Should we hash keys? There is a very small risk of collision invloved.
self.hashkeys = hashkeys
def make_key(self, key):
return "SimpleCache-{0}:{1}".format(self.prefix, key)
def namespace_key(self, namespace):
return self.make_key(namespace + ':*')
def get_set_name(self):
return "SimpleCache-{0}-keys".format(self.prefix)
def store(self, key, value, expire=None):
"""
Method stores a value after checking for space constraints and
freeing up space if required.
:param key: key by which to reference datum being stored in Redis
:param value: actual value being stored under this key
:param expire: time-to-live (ttl) for this datum
"""
key = to_unicode(key)
value = to_unicode(value)
set_name = self.get_set_name()
|
while self.connection.scard(set_name) >= self.limit:
del_key = self.connection.spop(set_name)
self.connection.delete(self.make_key(del_key))
pipe = self.connection.pipeline()
if expire is None:
expire = self.expire
if (isinstance(expire, int) and expire <= 0) or (expire is None):
pipe.set(self.make_key(key), value)
else:
pipe.setex(self.make_key(key), expire, value)
pipe.sadd(set_name, key)
pipe.execute()
def expire_all_in_set(self):
"""
M | ethod expires all keys in the namespace of this object.
At times there is a need to invalidate cache in bulk, because a
single change may result in all data returned by a decorated function
to be altered.
Method returns a tuple where first value is total number of keys in
the set of this object's namespace and second value is a number of
keys successfully expired.
:return: int, int
"""
all_members = self.keys()
keys = [self.make_key(k) for k in all_members]
with self.connection.pipeline() as pipe:
pipe.delete(*keys)
pipe.execute()
return len(self), len(all_members)
def expire_namespace(self, namespace):
"""
Method expires all keys in the namespace of this object.
At times there is a need to invalidate cache in bulk, because a
single change may result in all data returned by a decorated function
to be altered.
Method returns a tuple where first value is total number of keys in
the set of this object's namespace and second value is a number of
keys successfully expired.
:return: int, int
"""
namespace = self.namespace_key(namespace)
all_members = list(self.connection.keys(namespace))
with self.connection.pipeline() as pipe:
pipe.delete(*all_members)
pipe.execute()
return len(self), len(all_members)
def isexpired(self, key):
"""
Method determines whether a given key is already expired. If not expired,
we expect to get back current ttl for the given key.
:param key: key being looked-up in Redis
:return: bool (True) if expired, or int representing current time-to-live (ttl) value
"""
ttl = self.connection.pttl("SimpleCache-{0}".format(key))
if ttl == -2: # not exist
ttl = self.connection.pttl(self.make_key(key))
elif ttl == -1:
return True
if not ttl is None:
return ttl
else:
return self.connection.pttl("{0}:{1}".format(self.prefix, key))
def store_json(self, key, value, expire=None):
self.store(key, json.dumps(value), expire)
def store_pickle(self, key, value, expire=None):
self.store(key, pickle.dumps(value), expire)
def get(self, key):
key = to_unicode(key)
if key: # No need to validate membership, which is an O(1) operation, but seems we can do without.
value = self.connection.get(self.make_key(key))
if value is None: # expired key
if not key in self: # If key does not exist at all, it is a straight miss.
raise CacheMissException
self.connection.srem(self.get_set_name(), key)
raise ExpiredKeyException
else:
return value
def mget(self, keys):
"""
Method returns a dict of key/values for found keys.
:param keys: array of keys to look up in Redis
:return: dict of found key/values
"""
if keys:
cache_keys = [self.make_key(to_unicode(key)) for key in keys]
values = self.connection.mget(cache_keys)
if None in values:
pipe = self.connection.pipeline()
for cache_key, value in zip(cache_keys, values):
if value is None: # non-existant or expired key
pipe.srem(self.get_set_name(), cache_key)
pipe.execute()
return dict((k,v ) for k, v in zip(keys, values) if v is not None)
def get_json(self, key):
return json.loads(self.get(key))
def get_pickle(self, key):
return pickle.loads(self.get(key))
def mget_json(self, keys):
"""
Method returns a dict of key/values for found keys with each value
parsed from JSON format.
:param keys: array of keys to look up in Redis
:return: dict of found key/values with values parsed from JSON format
"""
d = self.mget(keys)
if d:
for key in d.keys():
d[key] = json.loads(d[key]) if d[key] else None
return d
def invalidate(self, key):
"""
Method removes (invalidates) an item from the cache.
:param key: key to remove from Redis
"""
key = to_unicode(key)
pipe = self.connection.pipeline()
pipe.srem(self.get_set_name(), key)
pipe.delete(self.make_key(key))
pipe.execute()
def __contains__(self, key):
return self.connection.sismember(self.get_set_name(), key)
def __iter__(self):
if not self.connection:
return iter([])
return iter(
["{0}:{1}".format(self.prefix, x)
for x in self.connection.smembers(self.get_set_name())
])
def __len__(self):
return self.connection.scard(self.get_set_name())
def keys(self):
return self.connection.smembers(self.get_set_name())
def flush(self):
keys = list(self.keys())
keys.append(self.get_set_name())
with self.connection.pipeline() as pipe:
pipe.delete(*keys)
pipe.execute()
def flush_namespace(self, space):
namespace = self.namespace_key(space)
setname = self.get_set_name()
keys = list(self.connection. |
AHaymanDev/CS453DjangoProj | bookrental/views.py | Python | gpl-2.0 | 8,027 | 0.003114 | from django.shortcuts import render, render_to_response
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import ListView
from bookrental.forms import UserCreateForm
from bookrental.models import Book
from bookrental.tables import BookTable
from bookrental.models import Cart
from bookrental.tables import CartTable
from bookrental.models import Prices
from bookrental.tables import PriceTable
from django_tables2 import RequestConfig
from django.core.context_processors import csrf
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.db.models import F
from django.db.models import Q
from bookrental.models import Returns
from bookrental.tables import ReturnTable
# Create your views here.
def book(request):
c = {}
c.update(csrf(request))
# select all the books with the user's current category selected
select_books_from = request.POST.get('books')
table = BookTable(Book.objects.filter(category=request.POST.get('books'))) # request.session['category']))
RequestConfig(request).configure(table)
if request.method == "GET":
#pks = request.POST.getlist("selection")
pks = request.GET.getlist("selection")
selected_books = Book.objects.filter(pk__in=pks)
# put selected books in cart
# TODO: Doesn't work; not saving to the cart table!!!
#for p in pks:
kcart = Cart(isbn='978-123456', quantity=1, price=0)
#for p in Prices.objects.all():
# if b.isbn == p.isbn:
# kcart.price = p.price
# break
kcart.save()
#table = CartTable(Cart.objects.all())))))
#RequestConfig(request).configure(table)
# pass these books to cart page
return HttpResponseRedirect(reverse('cart'))#, c, {'table': table})
return render(request, 'bookrental/Books.html', {'table': table, 'select_books_from': select_books_from})
def checkout(request):
# displays a successful checkout page
return render_to_response('bookrental/Checkout.html')
def info(request):
return render_to_response('bookrental/InfoPage.html')
def login_page(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the login button was clicked, authenticate the given user/pass combo
username1 = request.POST.get('username')
password1 = request.POST.get('password')
user = authenticate(username=username1, password=password1)
if user is not None:
login(request, user)
# update session
request.session['username'] = username1
# good login, so go to warning page
return HttpResponseRedirect('warning/')
else:
# bad login, so go to failure
return HttpResponseRedirect('login_failure/')
return render_to_response('bookrental/Login.html', c)
def return_confirm(request):
# display a return confirmation page
return render_to_response('bookrental/ReturnConfirm.html')
def returns(request):
c = {}
c.update(csrf(request))
# Create a table of all returnable objects
table = ReturnTable(Returns.objects.all())
RequestConfig(request).configure(table)
if request.method == "POST":
# get list of returning books, delete from total returns
pks = request.POST.getlist("returning")
returned_books = Returns.objects.filter(~Q(pk__in=pks))
# pass these books to return confirmation page as table
table = ReturnTable(returned_books)
RequestConfig(request).configure(table)
return render(request, 'bookrental/ReturnConfirm.html', {'table': table})
return render(request, 'bookrental/Returns.html', {'table': table})
def warning(request):
# displays the disclaimer page
return render_to_response('bookrental/Warning.html')
def cart(request):
c = {}
c.update(csrf(request))
pks = request.GET.getlist("selection")
# get new books to add, join with price table
new_cart = Cart.objects.all()
for c in new_cart:
for p in pks:
# if a cart item is not selected, delete it
if c.isbn != p:
c.delete()
table = CartTable(new_cart)
RequestConfig(request).configure(table)
if request.method == "POST":
pks = request.POST.getlist("removed")
# add all books NOT in removed
removed_books = Cart.objects.filter(~Q(pk__in=pks))
#pass these books to cart page as table
table = CartTable(removed_books)
RequestConfig(request).configure(table)
# display updated table on same page
return render(request, 'bookrental/YourCart.html', {'table': table})
return render(request, 'bookrental/YourCart.html', {'table': table})
def category(request):
c = {}
c.update(csrf(request))
# all available categories for books
categories = {"programming_languages", "software_engineering", "computer_networking", "operating_systems", "database_systems", "computer_organization"}
if request.method == 'POST':
# if the button was pressed, pass the selected category to the books page
select_books_from = request.POST.get('books')
request.session['category'] = select_books_from
return HttpResponseRedirect(reverse('book'), c, {'select_books_from': select_books_from})
return render_to_response('bookrental/category.html', c, context_instance=RequestContext(request))
def login_failure(request):
c = {}
c.update(csrf(request))
if request.method == 'POST':
# if the button was clicked, authenticate user and pass in auth_user table
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user is not None:
# if the user/pass pair is good, login and redirect to warning page
login(request, user)
# update session
request.session['username'] = username
return HttpResponseRedirect(reverse('warning'))
return render_to_response('bookrental/login_failure.html', c)
def logout_page(request):
# clear out their cart
for c in Cart.objects.all():
c.delete()
# log | out the user
logout(request)
# go back to the login page
return render(request, 'bookrental/Login.html')
# Register a new user with a custom form, log them in, and redirect to the Warning page.
def new_user(request):
if request.method == 'POST':
# when they hit submit, check if their form is correct
user_form = UserCreateForm(request.POST)
if user_form.is_valid():
username1 = user_form.clean_username()
password = user_form.clean_passwor | d2()
user_form.save()
user = authenticate(username=username1, password=password)
login(request, user)
# update current session
request.session['username'] = username1
return HttpResponseRedirect(reverse('warning'))
user_form = UserCreateForm()
return render(request, 'bookrental/new_user.html', {'user_form': user_form})
def update_user(request):
if request.method == 'POST':
# if they hit submit, get their user and pass
username = request.session['username'] # request.user
password = request.POST.get('password')
# Current password is correct, so can set new password
if authenticate(username=username, passoword=password) is not None:
request.user.set_password(request.POST.get('new_password'))
request.user.email = request.POST.get('email')
# go to category page
return HttpResponseRedirect(reverse('category'))
return render_to_response('bookrental/update_user.html')
################################################
|
kbluck/pytws | tws/_Execution.py | Python | bsd-3-clause | 1,064 | 0.006579 | '''Execution data structure.'''
__copyright__ = "Copyright (c) | 2008 Kevin J Bluck"
__version__ = "$Id$"
class Execution(object):
'''Data structure to describe a trade execution.
'''
def __init__(self, order_id=0, client_id=0, exec_id="", time="", acct_number="", exchange="", side="",
shares=0, price=0.0, perm_id=0, liquidation=0, cum_qty=0, avg_price=0.0):
| self.m_orderId = order_id
self.m_clientId = client_id
self.m_execId = exec_id
self.m_time = time
self.m_acctNumber = acct_number
self.m_exchange = exchange
self.m_side = side
self.m_shares = shares
self.m_price = price
self.m_permId = perm_id
self.m_liquidation = liquidation
self.m_cumQty = cum_qty
self.m_avgPrice = avg_price
def __eq__(self, other):
if id(self) == id(other): return True
if not isinstance(other, self.__class__): return False
return True if (
(self.m_execId == other.m_execId)
) else False
|
kkuunnddaannkk/vispy | vispy/visuals/mesh.py | Python | bsd-3-clause | 12,882 | 0 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
""" A MeshVisual Visual that uses the new shader Function.
"""
from __future__ import division
import numpy as np
from .visual import Visual
from .shaders import Function, Varying
from ..gloo import VertexBuffer, IndexBuffer
from ..geometry import MeshData
from ..color import Color
# Shaders for lit rendering (using phong shading)
shading_vertex_template = """
varying vec3 v_normal_vec;
varying vec3 v_light_vec;
varying vec3 v_eye_vec;
varying vec4 v_ambientk;
varying vec4 v_light_color;
varying vec4 v_base_color;
void main() {
v_ambientk = $ambientk;
v_light_color = $light_color;
v_base_color = $base_color;
vec4 pos_scene = $visual2scene($to_vec4($position));
vec4 normal_scene = $visual2scene(vec4($normal, 1));
vec4 origin_scene = $visual2scene(vec4(0, 0, 0, 1));
normal_scene /= normal_scene.w;
origin_scene /= origin_scene.w;
vec3 normal = normalize(normal_scene.xyz - origin_scene.xyz);
v_normal_vec = normal; //VARYING COPY
vec4 pos_front = $scene2doc(pos_scene);
pos_front.z += 0.01;
pos_front = $doc2scene(pos_front);
pos_front /= pos_front.w;
vec4 pos_back = $scene2doc(pos_scene);
pos_back.z -= 0.01;
pos_back = $doc2scene(pos_back);
pos_back /= pos_back.w;
vec3 eye = normalize(pos_front.xyz - pos_back.xyz);
v_eye_vec = eye; //VARYING COPY
vec3 light = normalize($light_dir.xyz);
v_light_vec = light; //VARYING COPY
gl_Position = $transform($to_vec4($position));
}
"""
shading_fragment_template = """
varying vec3 v_normal_vec;
varying vec3 v_light_vec;
varying vec3 v_eye_vec;
varying vec4 v_ambientk;
varying vec4 | v_light_color;
varying vec4 v_base_color;
void main() {
//DIFFUSE
float diffusek = dot(v_light_vec, v_normal_vec);
//clamp, because 0 < theta < pi/2
diffusek = clamp(diffusek, 0, 1);
vec4 diffuse_color = v_light_color * diffusek;
//diffuse_color.a = 1.0;
//SPECULAR
//reflect light wrt normal for the reflected ray, then
//find the angle made with the eye
float speculark = dot(reflect(v | _light_vec, v_normal_vec), v_eye_vec);
speculark = clamp(speculark, 0, 1);
//raise to the material's shininess, multiply with a
//small factor for spread
speculark = 20 * pow(speculark, 200.0);
vec4 specular_color = v_light_color * speculark;
gl_FragColor =
v_base_color * (v_ambientk + diffuse_color) + specular_color;
//gl_FragColor = vec4(speculark, 0, 1, 1.0);
}
"""
# Shader code for non lighted rendering
vertex_template = """
void main() {
gl_Position = $transform($to_vec4($position));
}
"""
fragment_template = """
void main() {
gl_FragColor = $color;
}
"""
# Functions that can be used as is (don't have template variables)
# Consider these stored in a central location in vispy ...
vec3to4 = Function("""
vec4 vec3to4(vec3 xyz) {
return vec4(xyz, 1.0);
}
""")
vec2to4 = Function("""
vec4 vec2to4(vec2 xyz) {
return vec4(xyz, 0.0, 1.0);
}
""")
class MeshVisual(Visual):
"""Mesh visual
Parameters
----------
vertices : array-like | None
The vertices.
faces : array-like | None
The faces.
vertex_colors : array-like | None
Colors to use for each vertex.
face_colors : array-like | None
Colors to use for each face.
color : instance of Color
The color to use.
meshdata : instance of MeshData | None
The meshdata.
shading : str | None
Shading to use.
mode : str
The drawing mode.
**kwargs : dict
Keyword arguments to pass to `Visual`.
"""
def __init__(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1, 1), meshdata=None,
shading=None, mode='triangles', **kwargs):
# Function for computing phong shading
# self._phong = Function(phong_template)
# Visual.__init__ -> prepare_transforms() -> uses shading
self.shading = shading
if shading is not None:
Visual.__init__(self, vcode=shading_vertex_template,
fcode=shading_fragment_template,
**kwargs)
else:
Visual.__init__(self, vcode=vertex_template,
fcode=fragment_template,
**kwargs)
self.set_gl_state('translucent', depth_test=True,
cull_face=False)
# Define buffers
self._vertices = VertexBuffer(np.zeros((0, 3), dtype=np.float32))
self._normals = None
self._faces = IndexBuffer()
self._colors = VertexBuffer(np.zeros((0, 4), dtype=np.float32))
self._normals = VertexBuffer(np.zeros((0, 3), dtype=np.float32))
# Uniform color
self._color = Color(color)
# varyings
self._color_var = Varying('v_color', dtype='vec4')
# Init
self._bounds = None
# Note we do not call subclass set_data -- often the signatures
# do no match.
MeshVisual.set_data(self, vertices=vertices, faces=faces,
vertex_colors=vertex_colors,
face_colors=face_colors, meshdata=meshdata,
color=color)
# primitive mode
self._draw_mode = mode
self.freeze()
def set_data(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=None, meshdata=None):
"""Set the mesh data
Parameters
----------
vertices : array-like | None
The vertices.
faces : array-like | None
The faces.
vertex_colors : array-like | None
Colors to use for each vertex.
face_colors : array-like | None
Colors to use for each face.
color : instance of Color
The color to use.
meshdata : instance of MeshData | None
The meshdata.
"""
if meshdata is not None:
self._meshdata = meshdata
else:
self._meshdata = MeshData(vertices=vertices, faces=faces,
vertex_colors=vertex_colors,
face_colors=face_colors)
self._bounds = self._meshdata.get_bounds()
if color is not None:
self._color = Color(color)
self.mesh_data_changed()
@property
def mode(self):
"""The triangle mode used to draw this mesh.
Options are:
* 'triangles': Draw one triangle for every three vertices
(eg, [1,2,3], [4,5,6], [7,8,9)
* 'triangle_strip': Draw one strip for every vertex excluding the
first two (eg, [1,2,3], [2,3,4], [3,4,5])
* 'triangle_fan': Draw each triangle from the first vertex and the
last two vertices (eg, [1,2,3], [1,3,4], [1,4,5])
"""
return self._draw_mode
@mode.setter
def mode(self, m):
modes = ['triangles', 'triangle_strip', 'triangle_fan']
if m not in modes:
raise ValueError("Mesh mode must be one of %s" % ', '.join(modes))
self._draw_mode = m
@property
def mesh_data(self):
"""The mesh data"""
return self._meshdata
@property
def color(self):
"""The uniform color for this mesh.
This value is only used if per-vertex or per-face colors are not
specified.
"""
return self._color
@color.setter
def color(self, c):
self.set_data(color=c)
def mesh_data_changed(self):
self._data_changed = True
self.update()
def _update_data(self):
md = self.mesh_data
# Update vertex/index buffers
if self.shading == 'smooth' and not m |
yunshichen/dblog | src/dblog/handler/urls.py | Python | apache-2.0 | 2,005 | 0 | #! /usr/bin/python
# -*- coding: UTF-8 -*-
from dblog.handler import index_handler
from dblog.handler import cat_handler
from dblog.handler import back_article_handler
from dblog.handler import ckeditor_handler
from dblog.handler import front_handler
urls = [
# -------- 博客首页
(r"/", front_handler.FrontIndex),
(r"/front", front_handler.FrontIndex),
(r"/front/article/detail", front_handler.Detail),
# -------- 后台登录
(r"/admin", index_handler.Index),
(r"/admin/inde | x", index_handler.Index),
| (r"/admin/ajax/login", index_handler.LoginAjax),
(r"/admin/logout", index_handler.Logout),
# -------- 分类
(r"/admin/cat/index", cat_handler.Index),
(r"/admin/ajax/cat/save", cat_handler.Save),
(r"/admin/ajax/cat/page", cat_handler.Page),
(r"/admin/ajax/cat/get", cat_handler.Get),
(r"/admin/ajax/cat/delete", cat_handler.Delete),
(r"/admin/ajax/cat/move_up", cat_handler.MoveUp),
(r"/admin/ajax/cat/move_down", cat_handler.MoveDown),
# -------- 文章
(r"/admin/article/index", back_article_handler.Index),
(r"/admin/article/detail", back_article_handler.Detail),
(r"/admin/ajax/article/save", back_article_handler.Save),
(r"/admin/ajax/article/page", back_article_handler.Page),
(r"/admin/ajax/article/get_content", back_article_handler.GetContent),
(r"/admin/ajax/article/delete", back_article_handler.Delete),
(r"/admin/ajax/article/toggle_status", back_article_handler.ToggleStatus),
(r"/admin/article/preview", back_article_handler.Preview),
#
# # -------- 网站管理
# (r"/site/index", site_handler.Index),
# (r"/ajax/site/make_it", site_handler.MakeIt),
#
# -------- 上传下载
(r"/admin/ajax/ckeditor/upload_image", ckeditor_handler.UploadImage),
(r"/upload/(.*)", ckeditor_handler.ReadUpload),
#
# # -------- 前台模板
(r"/temp/front/v1/index", front_handler.FrontIndex),
(r"/temp/front/v1/detail", front_handler.Detail),
]
|
mancoast/CPythonPyc_test | fail/312_test_fractions.py | Python | gpl-3.0 | 22,695 | 0.000485 | """Tests for Lib/fractions.py."""
from decimal import Decimal
from test.support import run_unittest
import math
import numbers
import operator
import fractions
import unittest
from copy import copy, deepcopy
from pickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
# shouldn't be calling __float__ at all when doing comparisons
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
class GcdTest(unittest.TestCase):
def testMisc(self):
self.assertEquals(0, gcd(0, 0))
self.assertEquals(1, gcd(1, 0))
self.assertEquals(-1, gcd(-1, 0))
self.assertEquals(1, gcd(0, 1))
self.assertEquals(-1, gcd(0, -1))
self.assertEquals(1, gcd(7, 1))
self.assertEquals(-1, gcd(7, -1))
self.assertEquals(1, gcd(-23, 15))
self.assertEquals(12, gcd(120, 84))
self.assertEquals(-12, gcd(84, -120))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEquals(type(expected), type(actual))
self.assertEquals(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
| """Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEquals(message, str(e))
el | se:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEquals((0, 1), _components(F()))
self.assertEquals((7, 1), _components(F(7)))
self.assertEquals((7, 3), _components(F(F(7, 3))))
self.assertEquals((-1, 1), _components(F(-1, 1)))
self.assertEquals((-1, 1), _components(F(1, -1)))
self.assertEquals((1, 1), _components(F(-2, -2)))
self.assertEquals((1, 2), _components(F(5, 10)))
self.assertEquals((7, 15), _components(F(7, 15)))
self.assertEquals((10**23, 1), _components(F(10**23)))
self.assertEquals((3, 77), _components(F(F(3, 7), 11)))
self.assertEquals((-9, 5), _components(F(2, F(-10, 9))))
self.assertEquals((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
def testFromString(self):
self.assertEquals((5, 1), _components(F("5")))
self.assertEquals((3, 2), _components(F("3/2")))
self.assertEquals((3, 2), _components(F(" \n +3/2")))
self.assertEquals((-3, 2), _components(F("-3/2 ")))
self.assertEquals((13, 2), _components(F(" 013/02 \n ")))
self.assertEquals((16, 5), _components(F(" 3.2 ")))
self.assertEquals((-16, 5), _components(F(" -3.2 ")))
self.assertEquals((-3, 1), _components(F(" -3. ")))
self.assertEquals((3, 5), _components(F(" .6 ")))
self.assertEquals((1, 3125), _components(F("32.e-5")))
self.assertEquals((1000000, 1), _components(F("1E+06")))
self.assertEquals((-12300, 1), _components(F("-1.23e4")))
self.assertEquals((0, 1), _components(F(" .0e+0\t")))
self.assertEquals((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEquals((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEquals((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEquals((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEquals(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEquals((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEquals((bigint, 1), _components(F.from_float(bigint)))
self.assertEquals((0, 1), _components(F.from_float |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/scripts/populate-distroseriesdiff.py | Python | agpl-3.0 | 394 | 0 | #!/usr/bin/python -S
#
# Copyright 2011 Canonic | al Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
import _pythonpath
from lp.registry.scripts.populate_distroseriesdiff import (
Popu | lateDistroSeriesDiff,
)
if __name__ == '__main__':
PopulateDistroSeriesDiff('populate-distroseriesdiff').run()
|
tuzzer/ai-gym | atari_breakout/atari_breakout_dqn_cntk.py | Python | mit | 11,222 | 0.003386 | import random
import numpy as np
import math
from time import perf_counter
import os
import sys
from collections import deque
import gym
import cntk
from cntk.layers import Convolution, MaxPooling, Dense
from cntk.models import Sequential, LayerStack
from cntk.initializer import glorot_normal
env = gym.make("Breakout-v0")
NUM_ACTIONS = env.action_space.n
SCREEN_H_ORIG, SCREEN_W_ORIG, NUM_COLOUR_CHANNELS = env.observation_space.shape
def preprocess_image(screen_image):
# crop the top and bottom
screen_image = screen_image[35:195]
# down sample by a factor of 2
screen_image = screen_image[::2, ::2]
# convert to grey scale
grey_image = np.zeros(screen_image.shape[0:2])
for i in range(len(screen_image)):
for j in range(len(screen_image[i])):
grey_image[i][j] = np.mean(screen_image[i][j])
return np.array([grey_image.astype(np.float)])
CHANNELS, IMAGE_H, IMAGE_W = preprocess_image(np.zeros((SCREEN_H_ORIG, SCREEN_W_ORIG))).shape
STATE_DIMS = (1, IMAGE_H, IMAGE_W)
class Brain:
BATCH_SIZE = 5
def __init__(self):
#### Construct the model ####
observation = cntk.ops.input_variable(STATE_DIMS, np.float32, name="s")
q_target = cntk.ops.input_variable(NUM_ACTIONS, np.float32, name="q")
# Define the structure of the neural network
self.model = self.create_convolutional_neural_network(observation, NUM_ACTIONS)
#### Define the trainer ####
self.learning_rate = cntk.learner.training_parameter_schedule(0.0001, cntk.UnitType.sample)
self.momentum = cntk.learner.momentum_as_time_constant_schedule(0.99)
self.loss = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
mean_error = cntk.ops.reduce_mean(cntk.ops.square(self.model - q_target), axis=0)
learner = cntk.adam_sgd(self.model.parameters, self.learning_rate, momentum=self.momentum)
self.trainer = cntk.Trainer(self.model, self.loss, mean_error, learner)
def train(self, x, y):
data = dict(zip(self.loss.arguments, [y, x]))
self.trainer.train_minibatch(data, outputs=[self.loss.output])
def predict(self, s):
return self.model.eval([s])
@staticmethod
def create_multi_layer_neural_network(input_vars, out_dims, num_hidden_layers):
num_hidden_neurons = 128
hidden_layer = lambda: Dense(num_hidden_neurons, activation=cntk.ops.relu)
output_layer = Dense(out_dims, activation=None)
model = Sequential([LayerStack(num_hidden_layers, hidden_layer),
output_layer])(input_vars)
return model
@staticmethod
def create_convolutional_neural_network(input_vars, out_dims):
convolutional_layer_1 = Convolution((5, 5), 32, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_2 = Convolution((5, 5), 64, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)
convolutional_layer_3 = Convolution((5, 5), 128, strides=1, activation=cntk.ops.relu, pad=True,
init=glorot_normal(), init_bias=0.1)
pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)
fully_connected_layer = Dense(1024, activation=cntk.ops.relu, init=glorot_normal(), init_bias=0.1)
output_layer = Dense(out_dims, activation=None, init=glorot_normal(), init_bias=0.1)
model = Sequential([convolutional_layer_1, pooling_layer_1,
convolutional_layer_2, pooling_layer_2,
#convolutional_layer_3, pooling_layer_3,
fully_connected_layer,
output_layer])(input_vars)
return model
class Memory:
def __init__(self, capacity):
self.examp | lers = deque(maxlen=capacity)
self.capacity = capacity
def add(self, sample):
self.examplers.append(sample)
def get_random_samples(self, num_samples):
num_samples = min(num_samples, len(self.exa | mplers))
return random.sample(tuple(self.examplers), num_samples)
def get_stack(self, start_index, stack_size):
end_index = len(self.examplers) - stack_size
if end_index < 0:
stack = list(self.examplers) + [self.examplers[-1] for _ in range(-end_index)]
else:
start_index = min(start_index, end_index)
stack = [self.examplers[i + start_index] for i in range(stack_size)]
return np.stack(stack, axis=-1)
def get_random_stacks(self, num_samples, stack_size):
start_indices = random.sample(range(len(self.examplers)), num_samples)
return [self.get_stack(start_index, stack_size) for start_index in start_indices]
def get_latest_stack(self, stack_size):
return self.get_stack(len(self.examplers), stack_size)
class Agent:
MEMORY_CAPACITY = 100000
DISCOUNT_FACTOR = 0.99
MAX_EXPLORATION_RATE = 1.0
MIN_EXPLORATION_RATE = 0.01
DECAY_RATE = 0.0001
def __init__(self):
self.explore_rate = self.MAX_EXPLORATION_RATE
self.brain = Brain()
self.memory = Memory(self.MEMORY_CAPACITY)
self.steps = 0
def act(self, s):
if random.random() < self.explore_rate:
return random.randint(0, NUM_ACTIONS - 1)
else:
return np.argmax(self.brain.predict(s))
def observe(self, sample):
self.steps += 1
self.memory.add(sample)
# Reduces exploration rate linearly
self.explore_rate = self.MIN_EXPLORATION_RATE + (self.MAX_EXPLORATION_RATE - self.MIN_EXPLORATION_RATE) * math.exp(-self.DECAY_RATE * self.steps)
def replay(self):
batch = self.memory.get_random_samples(self.brain.BATCH_SIZE)
batch_len = len(batch)
states = np.array([sample[0] for sample in batch], dtype=np.float32)
no_state = np.zeros(STATE_DIMS)
resultant_states = np.array([(no_state if sample[3] is None else sample[3]) for sample in batch], dtype=np.float32)
q_values_batch = self.brain.predict(states)
future_q_values_batch = self.brain.predict(resultant_states)
x = np.zeros((batch_len, ) + STATE_DIMS).astype(np.float32)
y = np.zeros((batch_len, NUM_ACTIONS)).astype(np.float32)
for i in range(batch_len):
state, action, reward, resultant_state = batch[i]
q_values = q_values_batch[0][i]
if resultant_state is None:
q_values[action] = reward
else:
q_values[action] = reward + self.DISCOUNT_FACTOR * np.amax(future_q_values_batch[0][i])
x[i] = state
y[i] = q_values
self.brain.train(x, y)
@classmethod
def action_from_output(cls, output_array):
return np.argmax(output_array)
def run_simulation(agent, solved_reward_level):
state = env.reset()
state = preprocess_image(state)
total_rewards = 0
time_step = 0
while True:
#env.render()
time_step += 1
action = agent.act(state.astype(np.float32))
resultant_state, reward, done, info = env.step(action)
resultant_state = preprocess_image(resultant_state)
if done: # terminal state
resultant_state = None
agent.observe((state, action, reward, resultant_state))
agent.replay()
state = resultant_state
total_rewards += reward
if total_rewards > solved_reward_level or done:
return total_rewards, time_step
def test(model_path, num_episodes=10):
root = cntk.load_model(model_path)
observation = env.reset() # reset environment for new episode
done = False
for episode in range(num_episodes):
while not done:
try:
env.render()
except Exception:
|
jeremiahyan/lammps | examples/SPIN/test_problems/validation_damped_exchange/plot_precession.py | Python | gpl-2.0 | 1,111 | 0.023402 | #!/usr/bin/env python3
import numpy as np, pylab, tkinter
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from decimal import *
import sys, string, os
argv = sys.argv
if len(argv) != 3:
print("Syntax: ./plot_precession.py res_lammps.dat res_llg.dat")
sys.exit( | )
lammps_file = sys.argv[1]
llg_file = sys.argv[2]
t_lmp,Sx_lmp,Sy_lmp,Sz_lmp,e_lmp = np.loadtxt(lammps_file,skiprows=0, usecols=(1,2,3,4,7),unpack=True)
t_llg,Sx_llg,Sy_llg,Sz_llg,e_llg = np.loadtxt(llg_file,skiprows=0, usecols=(0,1,2,3,4),unpack=True)
plt.figure()
plt.subplot(411)
plt.ylabel('Sx')
plt.plot(t_ | lmp, Sx_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sx_llg, 'r--', label='LLG')
plt.subplot(412)
plt.ylabel('Sy')
plt.plot(t_lmp, Sy_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sy_llg, 'r--', label='LLG')
plt.subplot(413)
plt.ylabel('Sz')
plt.plot(t_lmp, Sz_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, Sz_llg, 'r--', label='LLG')
plt.subplot(414)
plt.ylabel('E (eV)')
plt.plot(t_lmp, e_lmp, 'b-', label='LAMMPS')
plt.plot(t_llg, e_llg, 'r--', label='LLG')
plt.xlabel('time (in ps)')
plt.legend()
plt.show()
|
bluetiki/pylab | telnet2.py | Python | bsd-2-clause | 657 | 0.009132 | #!/usr/bin/env python
import telnetlib
import time
def main():
ip = '50.76.53.27'
port = 23
timeout = 3
user = 'pyclass'
passwd = '88newclass'
conn = telnetlib.Telnet(ip, port, timeout)
conn.read_until("sername:", timeout)
conn.write(user + '\n')
conn.read_until("assword:", timeout)
conn.write(passwd + '\n')
time.sleep(1)
conn.read_very_eager()
conn.write("terminal length 0\n")
time.sleep(1)
conn.read_very_eager()
conn.write("sh ip int br\n")
| time.sleep(1)
out | = conn.read_very_eager()
print out
conn.close()
if __name__ == "__main__":
main()
|
vmturbo/nova | nova/tests/unit/test_service.py | Python | apache-2.0 | 14,578 | 0.00048 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_service import service as _service
import testtools
from nova import exception
from nova import manager
from nova import objects
from nova import rpc
from nova import service
from nova import test
from nova.tests.unit import utils
test_service_opts = [
cfg.StrOpt("test_service_listen",
default='127.0.0.1',
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.NoDBTestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual('manager', serv.test_method())
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual('service', serv.test_method())
def test_service_with_min_down_time(self):
# TODO(hanlind): This really tests code in the servicegroup api.
self.flags(service_down_time=10, report_interval=10)
service.Service('test',
'test',
'test',
'nova.tests.unit.test_service.FakeManager')
self.assertEqual(25, CONF.service_down_time)
class ServiceTestCase(test.NoDBTestCase):
"""Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
def test_create(self):
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host, binary=self.binary,
topic=self.topic,
manager='nova.tests.unit.test_service.FakeManager')
self.assertTrue(app)
def test_repr(self):
# Test if a Service object is correctly represented, for example in
# log files.
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
exp = "<Service: host=foo, binary=nova-fake, " \
"manager_class_name=nova.tests.unit.test_service.FakeManager>"
self.assertEqual(exp, repr(serv))
@mock.patch.object(objects.Service, 'create')
@mock.patch.object(objects.Service, 'get_by_host_and_binary')
def test_init_and_start_hooks(self, mock_get_by_host_and_binary,
mock_create):
mock_get_by_host_and_binary.return_value = None
mock_manager = mock.Mock()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.manager = mock_manager
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
serv.manager.init_host.assert_called_once_with()
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
serv.manager.pre_start_hook.assert_called_once_with()
# post_start_hook is called after RPC consumer is created.
serv.manager.post_start_hook.assert_called_once_with()
@mock.patch('nova.objects.service.Service.get_by_host_and_binary')
def test_start_updates_version(self, mock_get_by_host_and_binary):
# test that the service version gets updated on services startup
service_obj = mock.Mock()
service_obj.binary = 'fake-binary'
service_obj.host = 'fake-host'
service_obj.version = -42
mock_get_by_host_and_binary.return_value = service_obj
serv = service.Service(self.host, self.binary, self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.start()
# test service version got updated and saved:
self.assertEqual(1, service_obj.save.call_count)
self.assertEqual(objects.service.SERVICE_VERSION, service_obj.version)
@mock.patch.object(objects.Service, 'create')
@mock.patch.object(objects.Service, 'get_by_host_and_binary')
def _test_service_check_create_race(self, ex,
mock_get_by_host_and_binary,
mock_create):
mock_manager = mock.Mock()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
class TestException(Exception):
pass
mock_get_by_host_and_binary.side_effect = [None, TestException()]
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(TestException, serv.start)
serv.manager.init_host.assert_called_with()
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
m | ock_create.assert_called_once_with()
def test_service_check_create_race_topic_exists(self):
ex = exception.ServiceTopicExists(host='foo', topic='bar')
self._test_service_check_create_ | race(ex)
def test_service_check_create_race_binary_exists(self):
ex = exception.ServiceBinaryExists(host='foo', binary='bar')
self._test_service_check_create_race(ex)
@mock.patch.object(objects.Service, 'create')
@mock.patch.object(objects.Service, 'get_by_host_and_binary')
@mock.patch.object(_service.Service, 'stop')
def test_parent_graceful_shutdown(self, mock_stop,
mock_get_by_host_and_binary,
mock_create):
mock_get_by_host_and_binary.return_value = None
mock_manager = mock.Mock()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.unit.test_service.FakeManager')
serv.manager = mock_manager
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
|
larsks/cloud-init | tests/unittests/test_vmware/test_guestcust_util.py | Python | gpl-3.0 | 2,910 | 0 | # Copyright (C) 2019 Canonical Ltd.
# Copyright (C) 2019 VMware INC.
#
# Author: Xiaofeng Wang <xiaofengw@vmware.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit import util
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
get_tools_config,
)
from cloudinit.tests.helpers import CiTestCase, mock
class TestGuestCustUtil(CiTestCase):
def test_get_tools_config_not_installed(self):
"""
This test is designed to verify the behavior if vmware-toolbox-cmd
is not installed.
"""
with mock.patch.object(util, 'which', return_value=None):
self.assertEqual(
| get_tools_config('section', 'key', 'defaultVal'), 'defaultVal')
def test_get_tools_config_internal_exception(self):
"""
This test is designed to verify the behavior if internal exception
is raised.
"""
with mock.patch.object(util, 'which', return_value='/dummy/p | ath'):
with mock.patch.object(util, 'subp',
return_value=('key=value', b''),
side_effect=util.ProcessExecutionError(
"subp failed", exit_code=99)):
# verify return value is 'defaultVal', not 'value'.
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'defaultVal')
def test_get_tools_config_normal(self):
"""
This test is designed to verify the value could be parsed from
key = value of the given [section]
"""
with mock.patch.object(util, 'which', return_value='/dummy/path'):
# value is not blank
with mock.patch.object(util, 'subp',
return_value=('key = value ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'value')
# value is blank
with mock.patch.object(util, 'subp',
return_value=('key = ', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'')
# value contains =
with mock.patch.object(util, 'subp',
return_value=('key=Bar=Wark', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'Bar=Wark')
# value contains specific characters
with mock.patch.object(util, 'subp',
return_value=('[a] b.c_d=e-f', b'')):
self.assertEqual(
get_tools_config('section', 'key', 'defaultVal'),
'e-f')
# vi: ts=4 expandtab
|
mrachinskiy/jewelcraft | op_gem_map/draw_handler.py | Python | gpl-3.0 | 1,672 | 0 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2022 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import gpu
from gpu_extras.presets import draw_texture_2d
from ..lib import view3d_lib
from . import onscreen_text
def draw(self, context):
width = self.region.width
| height = self.region.height
x = self.view_padding_left
y = self.view_padding_top
# Gem map
# -----------------------------
if not self.use_navigate:
gpu.state.blend_set("ALPHA")
draw_texture_2d(self.offscreen.texture_color, (0, 0), widt | h, height)
# Onscreen text
# -----------------------------
y = onscreen_text.onscreen_gem_table(self, x, y)
y -= self.view_margin
if self.show_warn:
y = onscreen_text.onscreen_warning(self, x, y)
y -= self.view_margin
view3d_lib.options_display(self, context, x, y)
# Reset state
# ----------------------------
gpu.state.blend_set("NONE")
|
hgl888/chromium-crosswalk | infra/scripts/legacy/scripts/common/env.py | Python | bsd-3-clause | 12,777 | 0.009783 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a standard mechanism for Chrome Infra Python environment setup.
This library provides a central location to define Chrome Infra environment
setup. It also provides several faculties to install this environment. |
Within a cooperating script, the environment can be setup by importing this
module and running its 'Install' method:
# Install Chrome-Infra environment (replaces 'sys.path').
sys.path.insert(0,
os.path.join(os.path.dirname(__file__), os.pardir, ...))
# (/path/to/build/scripts)
import common.env
common.env.Install()
When attempting to export the Chrome Infra path to external scripts, this
script can be invoked as an executable with various subcommands to emit a | valid
PYTHONPATH clause.
In addition, this module has several functions to construct the path.
The goal is to deploy this module universally among Chrome-Infra scripts,
BuildBot configurations, tool invocations, and tests to ensure that they all
execute with the same centrally-defined environment.
"""
import argparse
import collections
import contextlib
import imp
import itertools
import os
import sys
import traceback
# Export for bootstrapping.
__all__ = [
'Install',
'PythonPath',
]
# Name of enviornment extension file to seek.
ENV_EXTENSION_NAME = 'environment.cfg.py'
# Standard directories (based on this file's location in the <build> tree).
def path_if(*args):
if not all(args):
return None
path = os.path.abspath(os.path.join(*args))
return (path) if os.path.exists(path) else (None)
# The path to the <build> directory in which this script resides.
Build = path_if(os.path.dirname(__file__), os.pardir, os.pardir)
# The path to the <build_internal> directory.
BuildInternal = path_if(Build, os.pardir, 'build_internal')
def SetPythonPathEnv(value):
"""Sets the system's PYTHONPATH environemnt variable.
Args:
value (str): The value to use. If this is empty/None, the system's
PYTHONPATH will be cleared.
"""
# Since we can't assign None to the environment "dictionary", we have to
# either set or delete the key depending on the original value.
if value is not None:
os.environ['PYTHONPATH'] = str(value)
else:
os.environ.pop('PYTHONPATH', None)
def Install(**kwargs):
"""Replaces the current 'sys.path' with a hermetic Chrome-Infra path.
Args:
kwargs (dict): See GetInfraPythonPath arguments.
Returns (PythonPath): The PythonPath object that was installed.
"""
infra_python_path = GetInfraPythonPath(**kwargs)
infra_python_path.Install()
return infra_python_path
def SplitPath(path):
"""Returns (list): A list of path elements.
Splits a path into path elements. For example (assuming '/' is the local
system path separator):
>>> print SplitPath('/a/b/c/d')
['/', 'a', 'b', 'c', 'd']
>>> print SplitPath('a/b/c')
['a', 'b,' 'c']
"""
parts = []
while True:
path, component = os.path.split(path)
if not component:
if path:
parts.append(path)
break
parts.append(component)
parts.reverse()
return parts
def ExtendPath(base, root_dir):
"""Returns (PythonPath): The extended python path.
This method looks for the ENV_EXTENSION_NAME file within "root_dir". If
present, it will be loaded as a Python module and have its "Extend" method
called.
If no extension is found, the base PythonPath will be returned.
Args:
base (PythonPath): The base python path.
root_dir (str): The path to check for an extension.
"""
extension_path = os.path.join(root_dir, ENV_EXTENSION_NAME)
if not os.path.isfile(extension_path):
return base
with open(extension_path, 'r') as fd:
extension = fd.read()
extension_module = imp.new_module('env-extension')
# Execute the enviornment extension.
try:
exec extension in extension_module.__dict__
extend_func = getattr(extension_module, 'Extend', None)
assert extend_func, (
"The environment extension module is missing the 'Extend()' method.")
base = extend_func(base, root_dir)
if not isinstance(base, PythonPath):
raise TypeError("Extension module returned non-PythonPath object (%s)" % (
type(base).__name__,))
except Exception:
# Re-raise the exception, but include the configuration file name.
tb = traceback.format_exc()
raise RuntimeError("Environment extension [%s] raised exception: %s" % (
extension_path, tb))
return base
def IsSystemPythonPath(path):
"""Returns (bool): If a python path is user-installed.
Paths that are known to be user-installed paths can be ignored when setting
up a hermetic Python path environment to avoid user libraries that would not
be present in other environments falsely affecting code.
This function can be updated as-needed to exclude other non-system paths
encountered on bots and in the wild.
"""
components = SplitPath(path)
for component in components:
if component in ('dist-packages', 'site-packages'):
return False
return True
class PythonPath(collections.Sequence):
"""An immutable set of Python path elements.
All paths represented in this structure are absolute. If a relative path
is passed into this structure, it will be converted to absolute based on
the current working directory (via os.path.abspath).
"""
def __init__(self, components=None):
"""Initializes a new PythonPath instance.
Args:
components (list): A list of path component strings.
"""
seen = set()
self._components = []
for component in (components or ()):
component = os.path.abspath(component)
assert isinstance(component, basestring), (
"Path component '%s' is not a string (%s)" % (
component, type(component).__name__))
if component in seen:
continue
seen.add(component)
self._components.append(component)
def __getitem__(self, value):
return self._components[value]
def __len__(self):
return len(self._components)
def __iadd__(self, other):
return self.Append(other)
def __repr__(self):
return self.pathstr
def __eq__(self, other):
assert isinstance(other, type(self))
return self._components == other._components
@classmethod
def Flatten(cls, *paths):
"""Returns (list): A single-level list containing flattened path elements.
>>> print PythonPath.Flatten('a', ['b', ['c', 'd']])
['a', 'b', 'c', 'd']
"""
result = []
for path in paths:
if not isinstance(path, basestring):
# Assume it's an iterable of paths.
result += cls.Flatten(*path)
else:
result.append(path)
return result
@classmethod
def FromPaths(cls, *paths):
"""Returns (PythonPath): A PythonPath instantiated from path elements.
Args:
paths (tuple): A tuple of path elements or iterables containing path
elements (e.g., PythonPath instances).
"""
return cls(cls.Flatten(*paths))
@classmethod
def FromPathStr(cls, pathstr):
"""Returns (PythonPath): A PythonPath instantiated from the path string.
Args:
pathstr (str): An os.pathsep()-delimited path string.
"""
return cls(pathstr.split(os.pathsep))
@property
def pathstr(self):
"""Returns (str): A path string for the instance's path elements."""
return os.pathsep.join(self)
def IsHermetic(self):
"""Returns (bool): True if this instance contains only system paths."""
return all(IsSystemPythonPath(p) for p in self)
def GetHermetic(self):
"""Returns (PythonPath): derivative PythonPath containing only system paths.
"""
return type(self).FromPaths(*(p for p in self if IsSystemPythonPath(p)))
def Append(self, *paths):
"""Returns (PythonPath): derivative PythonPath with paths added to the end.
Args:
paths (tuple): A tuple of path elements to append to the current instance.
"""
return type(self)(itertools.chain(self, self.FromPaths(*paths)))
def |
DiogoDantas/tourgastronomico | projetoIA/urls.py | Python | mit | 172 | 0 | from django.conf.urls im | port include, url
from django.contrib import adm | in
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('agenteIA.urls'))
]
|
folivetti/PI-UFABC | AULA_02/Python/PedraPapelTesoura.py | Python | mit | 690 | 0.010145 | # -*- coding: cp1252 -*-
'''
PedraPapelTesoura.py
* Dada duas entradas nos valores 0 (pedra), 1 (papel) ou 2 (tesoura),
* verificar quem ganho o jogo.
* Entrada: jogador1, jogador2 (int)
* Saída: jogador1 ganhou, jogador2 ganhou, empate
Autor: Fabrício Olivetti de França
Disciplina Processamento da Informação
Universidade Federal do ABC
'''
jogador1 = int( raw_input("Pedra (0), Papel (1) ou Tesoura (2)? ") )
j | ogador2 = int( raw_input("Pedra (0), Papel (1) ou Tes | oura (2)? ") )
if jogador1 > jogador2 or jogador1==0 and jogador2 == 2:
print "Jogador 1 venceu"
elif jogador2 > jogador1 or jogador1==2 and jogador2 == 0:
print "Jogador 2 venceu"
else:
print "Empate"
|
yyamnk/cec2013single | setup.py | Python | gpl-3.0 | 1,178 | 0.013582 | from setuptools import setup, find_packages, Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
# for unix
cec2013single = Extension("cec2013single.cec2013",
["cec2013single/cec2013.pyx", "cec2013single/cec2013_func.c"],
libraries=["m"]) # Unix-like specific. delete this opt for windows
# for windows
# cec2013single = Extension("cec2013single.cec2013",
# ["cec2013single/cec2013.pyx", "cec2013single/cec2013_func.c"])
setup(
name=' | cec2013single',
version='0.1',
author='Daniel Molina',
author_email='daniel.molina@uca.es',
description='Package for benchmark for the Real Single Objective Optimization session on IEEE Congress on Evolutionary Computation CEC\'2013',
long_description=open('README.rst').read(),
license='GPL V | 3',
packages=['cec2013single'],
install_requires=['cython', 'numpy'],
ext_modules=cythonize(cec2013single),
# ext_modules=cythonize('benchmarks.pyx', annotated=True),
cmdclass={'build_ext': build_ext},
# Packaging options.
#include_package_data = True,
# Tests
#tests_require=['pytest'],
)
|
glensc/osquery-python | tests/test_config_plugin.py | Python | bsd-3-clause | 1,735 | 0.002882 | """This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
"../build/lib/")))
import osquery
class MockConfigPlugin(osquery.ConfigPlugin):
"""Mock config plugin for testing the config API"""
def name(self):
return "foobar"
def content(self):
return [
{
"source_1": json.dumps({
"schedule": {
"foo": {
"query": "select * from foobar",
"interval": 5,
},
},
}),
},
]
class TestConfigPlugin(unittest.TestCase):
"""Tests for osquery.ConfigPlugin"""
def test_simple_call(self):
"""Tests for the call method of osquery.TablePlugin"""
ext_manager = osquery. | ExtensionManager()
ext_manager.add_plugin(MockConfigP | lugin)
response = ext_manager.call("config",
"foobar",
{"action":"genConfig"})
self.assertEqual(0, response.status.code)
self.assertTrue(len(response.response) > 0)
self.assertTrue("source_1" in response.response[0])
if __name__ == '__main__':
unittest.main()
|
umitproject/umpa | umit/umpa/utils/net.py | Python | lgpl-2.1 | 2,096 | 0.006679 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2009 Adriano Monteiro Marques.
#
# Author: Bartosz SKOWRON <getxsick at gmail dot com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Functions related to network issues.
"""
import umit.umpa.utils.bits
def in_cksum(data, cksum=0):
"""
Return Internet Checksum.
It is an implementation of RFC 1071.
To check if the already calculated checksum is correct, pass it as cksum |
argument. If the result is 0, then the ckecksum has not detected an error.
@type data: C{int}
@param data: the data from which checksum is calculated.
@type cksum: C{int}
@param cksum: already calculated checksum for comparision (default: 0)
@rtype: C{int}
@return: calculated checksum.
"""
pieces = umit.umpa.utils.bits.split_number_into_chunks(data)
if len(pieces)%2 == 1:
pieces.append(0)
for i in xrange(0, len(pieces), 2):
xxx = ((pieces[ | i] << 8) & 0xff00) + (pieces[i+1] & 0xff)
cksum += xxx
while cksum >> 16:
cksum = (cksum & 0xffff) + (cksum >> 16)
cksum = ~cksum
return int(cksum & 0xffff)
def parse_ipv4(ip):
"""
Return 4 numbers of an IPv4
192.168.1.2 -> [192, 168, 1, 2]
@type ip: C{str}
@param ip: the ip to parse.
@rtype: C{int}
@return: 4 numbers.
"""
return [ int(x) for x in ip.split('.') ]
|
zeroonegit/python | runoob/basic_tutorial/template.py | Python | mit | 64 | 0.015625 | #!/usr/bin/e | nv python3
# -*- coding: utf-8 -*-
# Filen | ame: .py
|
wuan/bo-python | blitzortung/dataimport/__init__.py | Python | apache-2.0 | 571 | 0.001751 | from .base import FileTransport, HttpFileTransport, Blitzort | ungDataPath, BlitzortungDataPathGenerator
from .raw_signal import RawSignalsBlitzortungDataProvider
from .station import StationsBlitzortungDataProvider
from .strike import StrikesBlitzortungDataProvider
def strikes(): |
from .. import INJECTOR
return INJECTOR.get(StrikesBlitzortungDataProvider)
def stations():
from .. import INJECTOR
return INJECTOR.get(StationsBlitzortungDataProvider)
def raw():
from .. import INJECTOR
return INJECTOR.get(RawSignalsBlitzortungDataProvider)
|
VanushVaswani/amcparser | tests/test_motion.py | Python | mpl-2.0 | 702 | 0 | from . import skeleton_path, m_walking_path
from numpy.testing import assert_allclose
from amcparser import Skeleton, SkelMotion
def test_motion_root():
sk = Skeleton(skeleton_path)
skm = | SkelMotion(sk, m_walking_path, (1.0 / 120.0))
skm.traverse('root', 0, 10)
root_pos = sk.root.xyz_data
assert_allclose(root_pos[0, :], map(float, skm.data['root'][0][0:3]))
assert_allclose(root_pos[9, :], map(float, skm.data['root'][9][0:3]))
def test_motion_bone():
sk = Skeleton(skeleton_path)
skm = SkelMotion(sk, m_walking_path, (1.0 / 120.0))
skm.traverse('LeftToesJoint', 0, -1)
bone_pos = sk.get_bone('LeftToesJoint').xyz_data
assert | bone_pos.shape[0] == 1741
|
mdurrant-b3/acos-client | setup.py | Python | apache-2.0 | 1,390 | 0.017266 | #!/usr/bin/env python
# flake8: noqa
from setuptools import find_packages, setup
setup(
na | me = "acos-client",
version = "1.4.6",
packages = find_packages(),
author = "A10 Networks",
author_email = "mdurrant@a10networks.com",
description = "A10 Networks ACOS API Client",
license = "Apache",
| keywords = "a10 axapi acos adc slb load balancer",
url = "https://github.com/a10networks/acos-client",
long_description = open('README.md').read(),
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires = ['requests>=2.3.0', 'six', 'uhashring'],
test_suite="acos_client.tests.test_suite"
)
|
mjcollin/idigphylo | api/api_stub.py | Python | mit | 5,084 | 0.004917 | from __future__ import absolute_import
import sys
sys.path.append("../lib")
sys.path.append("../workers")
from database import Database, Sequence
from tree.tree import build
from align.align import msa
from align.align import pipeline
import idigbio
opts = {}
opts["data_file"] = "data.nex"
#raw_seqs = {}
#raw_seqs["Tarsius_syrichta"] = "AAGTTTCATTGGAGCCACCACTCTTATAATTGCCCATGGCCTCACCTCCTCCCTATTATTTTGCCTAGCAAATACAAACTACGAACGAGTCCACAGTCGAACAATAGCACTAGCCCGTGGCCTTCAAACCCTATTACCTCTTGCAGCAACATGATGACTCCTCGCCAGCTTAACCAACCTGGCCCTTCCCCCAACAATTAATTTAATCGGTGAACTGTCCGTAATAATAGCAGCATTTTCATGGTCACACCTAACTATTATCTTAGTAGGCCTTAACACCCTTATCACCGCCCTATATTCCCTATATATACTAATCATAACTCAACGAGGAAAATACACATATCATATCA | ACAATATCATGCCCCCTTTCACCCGAGAAAATACATTAATAATCATACACCTATTTCCCTTAATCCTACTATCTACCAACCCCAAAGTAATTATAGGAACCATGTACTGTAAATATAGTTTAAACAAAACATTAGATTGTGAGTCTAATAATAGAAGCCCAAAGATTTCTTATTTACCAAGAAAGTA-TGCAAGAACTGCTAACTCATGCCTCCATATATAACAATGTGGCTTTCTT-ACTTTTAAAGGATAGAAGTAATCCATCGGTCTTAGGAACCGAAAA-ATTGGTGCAACTCCAAATAAAAGTAATAAATTTATTTTCATCCTCCATTTTACTATCACTTA | CACTCTTAATTACCCCATTTATTATTACAACAACTAAAAAATATGAAACACATGCATACCCTTACTACGTAAAAAACTCTATCGCCTGCGCATTTATAACAAGCCTAGTCCCAATGCTCATATTTCTATACACAAATCAAGAAATAATCATTTCCAACTGACATTGAATAACGATTCATACTATCAAATTATGCCTAAGCTT"
#raw_seqs["Lemur_catta"] = "AAGCTTCATAGGAGCAACCATTCTAATAATCGCACATGGCCTTACATCATCCATATTATTCTGTCTAGCCAACTCTAACTACGAACGAATCCATAGCCGTACAATACTACTAGCACGAGGGATCCAAACCATTCTCCCTCTTATAGCCACCTGATGACTACTCGCCAGCCTAACTAACCTAGCCCTACCCACCTCTATCAATTTAATTGGCGAACTATTCGTCACTATAGCATCCTTCTCATGATCAAACATTACAATTATCTTAATAGGCTTAAATATGCTCATCACCGCTCTCTATTCCCTCTATATATTAACTACTACACAACGAGGAAAACTCACATATCATTCGCACAACCTAAACCCATCCTTTACACGAGAAAACACCCTTATATCCATACACATACTCCCCCTTCTCCTATTTACCTTAAACCCCAAAATTATTCTAGGACCCACGTACTGTAAATATAGTTTAAA-AAAACACTAGATTGTGAATCCAGAAATAGAAGCTCAAAC-CTTCTTATTTACCGAGAAAGTAATGTATGAACTGCTAACTCTGCACTCCGTATATAAAAATACGGCTATCTCAACTTTTAAAGGATAGAAGTAATCCATTGGCCTTAGGAGCCAAAAA-ATTGGTGCAACTCCAAATAAAAGTAATAAATCTATTATCCTCTTTCACCCTTGTCACACTGATTATCCTAACTTTACCTATCATTATAAACGTTACAAACATATACAAAAACTACCCCTATGCACCATACGTAAAATCTTCTATTGCATGTGCCTTCATCACTAGCCTCATCCCAACTATATTATTTATCTCCTCAGGACAAGAAACAATCATTTCCAACTGACATTGAATAACAATCCAAACCCTAAAACTATCTATTAGCTT"
#raw_seqs["Homo_sapiens"] = "AAGCTTCACCGGCGCAGTCATTCTCATAATCGCCCACGGGCTTACATCCTCATTACTATTCTGCCTAGCAAACTCAAACTACGAACGCACTCACAGTCGCATCATAATCCTCTCTCAAGGACTTCAAACTCTACTCCCACTAATAGCTTTTTGATGACTTCTAGCAAGCCTCGCTAACCTCGCCTTACCCCCCACTATTAACCTACTGGGAGAACTCTCTGTGCTAGTAACCACGTTCTCCTGATCAAATATCACTCTCCTACTTACAGGACTCAACATACTAGTCACAGCCCTATACTCCCTCTACATATTTACCACAACACAATGGGGCTCACTCACCCACCACATTAACAACATAAAACCCTCATTCACACGAGAAAACACCCTCATGTTCATACACCTATCCCCCATTCTCCTCCTATCCCTCAACCCCGACATCATTACCGGGTTTTCCTCTTGTAAATATAGTTTAACCAAAACATCAGATTGTGAATCTGACAACAGAGGCTTA-CGACCCCTTATTTACCGAGAAAGCT-CACAAGAACTGCTAACTCATGCCCCCATGTCTAACAACATGGCTTTCTCAACTTTTAAAGGATAACAGCTATCCATTGGTCTTAGGCCCCAAAAATTTTGGTGCAACTCCAAATAAAAGTAATAACCATGCACACTACTATAACCACCCTAACCCTGACTTCCCTAATTCCCCCCATCCTTACCACCCTCGTTAACCCTAACAAAAAAAACTCATACCCCCATTATGTAAAATCCATTGTCGCATCCACCTTTATTATCAGTCTCTTCCCCACAACAATATTCATGTGCCTAGACCAAGAAGTTATTATCTCGAACTGACACTGAGCCACAACCCAAACAACCCAGCTCTCCCTAAGCTT"
#raw_seqs["Pan"] = "AAGCTTCACCGGCGCAATTATCCTCATAATCGCCCACGGACTTACATCCTCATTATTATTCTGCCTAGCAAACTCAAATTATGAACGCACCCACAGTCGCATCATAATTCTCTCCCAAGGACTTCAAACTCTACTCCCACTAATAGCCTTTTGATGACTCCTAGCAAGCCTCGCTAACCTCGCCCTACCCCCTACCATTAATCTCCTAGGGGAACTCTCCGTGCTAGTAACCTCATTCTCCTGATCAAATACCACTCTCCTACTCACAGGATTCAACATACTAATCACAGCCCTGTACTCCCTCTACATGTTTACCACAACACAATGAGGCTCACTCACCCACCACATTAATAACATAAAGCCCTCATTCACACGAGAAAATACTCTCATATTTTTACACCTATCCCCCATCCTCCTTCTATCCCTCAATCCTGATATCATCACTGGATTCACCTCCTGTAAATATAGTTTAACCAAAACATCAGATTGTGAATCTGACAACAGAGGCTCA-CGACCCCTTATTTACCGAGAAAGCT-TATAAGAACTGCTAATTCATATCCCCATGCCTGACAACATGGCTTTCTCAACTTTTAAAGGATAACAGCCATCCGTTGGTCTTAGGCCCCAAAAATTTTGGTGCAACTCCAAATAAAAGTAATAACCATGTATACTACCATAACCACCTTAACCCTAACTCCCTTAATTCTCCCCATCCTCACCACCCTCATTAACCCTAACAAAAAAAACTCATATCCCCATTATGTGAAATCCATTATCGCGTCCACCTTTATCATTAGCCTTTTCCCCACAACAATATTCATATGCCTAGACCAAGAAGCTATTATCTCAAACTGGCACTGAGCAACAACCCAAACAACCCAGCTCTCCCTAAGCTT"
#opts["raw_seqs"] = raw_seqs
# Build raw seqs from db lookup given ids, consider doing this as a worker
# in the future if we move away from a static database.
#idb_uuids = ["23984",
#"995440",
#"81211",
#"5559384"]
idb = idigbio.json()
rq_ = {"genus": "acer"}
limit_ = 10
fields_ = ["uuid"]
sort_ = ["uuid"]
results = idb.search_records(rq=rq_, limit=limit_, fields=fields_, sort=sort_)
#print len(results["items"])
#print results["items"][0]["indexTerms"]["genus"]
#exit(0)
idb_uuids = []
for rec in results["items"]:
idb_uuids.append(rec["indexTerms"]["uuid"])
print idb_uuids
db = Database()
opts["raw_seqs"] = {}
for seq in db.sess.query(Sequence).filter(Sequence.idb_uuid.in_(idb_uuids)):
# The "-" char messes up MrBayes even in the taxon name string field.
# Change that here and it will percolate through the output without
# affecting the data sources on the front end.
opts["raw_seqs"][seq.idb_uuid.replace("-", "_")] = seq.seq
opts["seq_type"] = "dna"
print opts
#msa.delay(opts)
#build.delay(opts)
pipeline.delay(opts)
|
yanyao/openstack-ansible | doc/source/conf.py | Python | apache-2.0 | 12,061 | 0.000497 | #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import pbr.version
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../playbooks/inventory/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinxmark'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2017, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
target_name = 'openstack-ansible'
title = 'OpenStack-Ansible Documentation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version_info = pbr.version.VersionInfo(target_name)
# The full version, including alpha/beta/rc tags.
release = version_info.version_string_with_vcs()
# The short X.Y version.
version = version_info.canonical_version_string()
# openstackdocstheme options
repository_name = 'openstack/' + target_name
bug_project = project.lower()
bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the s | idebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths | that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# If true, publish source files
html_copy_source = False
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_u |
freeitaly/Trading-System | vn.trader/ctaAlgo/ctaSetting.py | Python | mit | 614 | 0.002381 | # encoding: UTF-8
'''
在本文件中引入所有希望在系统中使用的策略类
|
这个字典中保存了需要运行的策略的名称和策略类的映射关系,
用户的策略类写好后,先在该文件中引入,并设置好名称,然后
在CTA_setting.json中写入具体每个策略对象的类和合约设置。
'''
from ctaDemo import DoubleEmaDemo
from strategyTickBreaker import TickBreaker
from strategyRock import | RockStrategy
STRATEGY_CLASS = {}
STRATEGY_CLASS['TickBreaker'] = TickBreaker
STRATEGY_CLASS['DoubleEmaDemo'] = DoubleEmaDemo
STRATEGY_CLASS['RockStrategy'] = RockStrategy |
kczapla/pylint | pylint/checkers/newstyle.py | Python | gpl-2.0 | 5,166 | 0.001161 | # Copyright (c) 2006, 2008-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Alexander Todorov <atodorov@otb.bg>
# Copyright (c) 2016 Jakub Wilk <jwilk@jwilk.net>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""check for new / old style related problems
"""
import sys
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages, node_frame_class, has_known_bases
MSGS = {
"E1003": (
"Bad first argument %r given to super()",
"bad-super-call",
"Used when another argument than the current class is given as "
"first argument of the super builtin.",
),
"E1004": (
"Missing argument to super()",
"missing-super-argument",
"Used when the super builtin didn't receive an " "argument.",
{"maxversion": (3, 0)},
),
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = "newstyle"
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
@check_messages("bad-super-call", "missing-super-argument")
def visit_functiondef(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
| for stmt in node.nodes_of_class(astroid.Call):
if node_frame_class(stmt) != node_frame_class(node):
# Don't look down in other scopes.
continue
expr = stmt.func
if not isinstance(expr, astroid.Attribute):
continue
call = expr.expr
# skip the test if using super
if not (
isinstance(call, astroid.Call)
and isinstance(call.func, astroid.Name)
| and call.func.name == "super"
):
continue
if not klass.newstyle and has_known_bases(klass):
# super should not be used on an old style class
continue
else:
# super first arg should be the class
if not call.args:
if sys.version_info[0] == 3:
# unless Python 3
continue
else:
self.add_message("missing-super-argument", node=call)
continue
# calling super(type(self), self) can lead to recursion loop
# in derived classes
arg0 = call.args[0]
if (
isinstance(arg0, astroid.Call)
and isinstance(arg0.func, astroid.Name)
and arg0.func.name == "type"
):
self.add_message("bad-super-call", node=call, args=("type",))
continue
# calling super(self.__class__, self) can lead to recursion loop
# in derived classes
if (
len(call.args) >= 2
and isinstance(call.args[1], astroid.Name)
and call.args[1].name == "self"
and isinstance(arg0, astroid.Attribute)
and arg0.attrname == "__class__"
):
self.add_message(
"bad-super-call", node=call, args=("self.__class__",)
)
continue
try:
supcls = call.args and next(call.args[0].infer(), None)
except astroid.InferenceError:
continue
if klass is not supcls:
name = None
# if supcls is not Uninferable, then supcls was infered
# and use its name. Otherwise, try to look
# for call.args[0].name
if supcls:
name = supcls.name
elif call.args and hasattr(call.args[0], "name"):
name = call.args[0].name
if name:
self.add_message("bad-super-call", node=call, args=(name,))
visit_asyncfunctiondef = visit_functiondef
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
|
CLVsol/clvsol_odoo_addons | clv_family_aux/wizard/__init__.py | Python | agpl-3.0 | 281 | 0 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later ( | http://www.gnu.org/licenses/agpl).
from . import family_aux_mass_edit
from . import family_associate_to_family_aux |
from . import family_aux_associate_to_address_aux
|
eth-cscs/production | spack/reframe/src/spack_util/spacklib.py | Python | gpl-3.0 | 10,153 | 0.002068 | import copy
import re
import semver
import glob
import os
import re
def parse_version(version_str):
intel_version_style = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', version_str)
major_minor_style = re.search(r'^(\d+)\.(\d+)$', version_str)
if intel_version_style:
major = intel_version_style.group(1)
minor = intel_version_style.group(2)
patchlevel = intel_version_style.group(3)
prerelease = intel_version_style.group(4)
ret = semver.VersionInfo(major, minor, patchlevel, prerelease)
elif major_minor_style:
major = major_minor_style.group(1)
minor = major_minor_style.group(2)
ret = semver.VersionInfo(major, minor, 0)
else:
ret = semver.VersionInfo.parse(version_str)
return ret
def get_highest_version(compilers):
ver = parse_version("0.0.0")
for version in compilers.keys():
if ver < parse_version(version):
ver = parse_version(version)
return ver
def get_module_available_versions(name, module_path=os.path.join(os.sep, 'opt', 'cray', 'modulefiles')):
'''Get all available ``name`` modules defined in path ``module_path``.
:returns: ``list`` on success.
'''
available_modules_files = glob.glob(os.path.join(module_path, name, '*'))
available_modules = [os.path.basename(m) for m in available_modules_files if not ".version" in m]
return available_modules
def get_cdts_modulerc_files(include_cdts=[r'cdt/']):
'''Get all available modulerc files defined inside the cdt modules, expect for the ``include_cdts`` cdts.
:returns: ``list`` on success.
'''
pe_path = os.path.join(os.sep, 'opt', 'cray', 'pe')
modulerc_files = glob.glob(os.path.join(pe_path, '*', '*', 'modulerc'))
for cdt in include_cdts:
modulerc_files = [m for m in modulerc_files if cdt in m and not 'default' in m]
return modulerc_files
def get_cdt_info_from_modulerc_file(modulerc_file):
cdt_regex = re.search(r'pe/(?P<name>\S+)\/(?P<version>\S+)/modulerc', modulerc_file)
cdt_name = cdt_regex.group('name')
cdt_version = cdt_regex.group('version')
return cdt_name, cdt_version
def get_full_cdt_name(cdt_name, cdt_version):
return cdt_name + '/' + cdt_version
def get_default_package_versions_from_modulerc(package_name, modulerc_file):
reg = re.compile(package_name + r'/(?P<version>\S+)')
with open(m | odulerc_file) as f:
lines = f.read()
matches = reg.findall(lines)
if matches:
return matches[0]
raise ValueError(f'cannot get module version for {package_name}')
def get_spack_generated_compilers(compiler_type, spack_compilers):
ret = {}
if not 'compilers' in spack_compilers:
return ret
for compiler in spack_compilers['compilers']:
if not 'compiler' in compiler:
continue
| com = compiler['compiler']
if not 'spec' in com:
continue
spec = com['spec']
if compiler_type in spec:
version = spec.split('@')[1]
# excluding the duplicate entries that have empty modules
# this happens with the cray supported compilers
# OS compilers should be included with empty modules entries
if version in ret:
if not ret[version]['modules']:
ret[version] = com
else:
ret[version] = com
return ret
def get_spack_os_compilers(compiler_type, spack_compilers):
compilers = get_spack_generated_compilers(compiler_type, copy.deepcopy(spack_compilers))
compiler_versions = list(compilers.keys())
for version in compiler_versions:
if 'modules' in compilers[version]:
if compilers[version]['modules']:
compilers.pop(version)
return compilers
def get_spack_cdt_compilers(compiler_type, spack_compilers):
compilers = get_spack_generated_compilers(compiler_type, copy.deepcopy(spack_compilers))
compiler_versions = list(compilers.keys())
for version in compiler_versions:
if 'modules' in compilers[version]:
if not compilers[version]['modules']:
compilers.pop(version)
return compilers
def extract_all_spack_generated_packages(spack_packages):
ret = {}
if not 'packages' in spack_packages:
return ret
pkgs = spack_packages['packages']
if 'all' in pkgs:
ret = copy.deepcopy(pkgs['all'])
return ret
def convert_packages_to_legacy(pkgs):
oldpkgs = {}
for spack_name, pkg in pkgs.items():
oldpkg = {}
oldpkg['modules'] = {}
oldpkg['paths'] = {}
modules = oldpkg['modules']
paths = oldpkg['paths']
if 'buildable' in pkg:
oldpkg['buildable'] = pkg['buildable']
if 'variants' in pkg:
oldpkg['variants'] = pkg['variants']
if 'externals' in pkg:
externals = pkg['externals']
for entry in externals:
if 'spec' in entry:
if 'modules' in entry:
modules[entry['spec']] = entry['modules'][0]
elif 'prefix' in entry:
paths[entry['spec']] = entry['prefix']
if not modules:
oldpkg.pop('modules')
if not paths:
oldpkg.pop('paths')
if not 'modules' in oldpkg and not 'paths' in oldpkg:
if 'buildable' in oldpkg:
oldpkg.pop('buildable')
if oldpkg:
oldpkgs.update({spack_name: oldpkg})
return oldpkgs
def get_spack_pkg_name(pkg_name, pkg_data):
spack_pkg_name = pkg_name
if 'name' in pkg_data:
spack_pkg_name = pkg_data['name']
return spack_pkg_name
def get_spack_pkg_variant(pkg_data):
variant = ''
if 'variants' in pkg_data:
variant = pkg_data['variants']
return variant
def get_pkg_properties(pkg_name, pkg_data, pkgs):
pkg = {}
spack_pkg_name = get_spack_pkg_name(pkg_name, pkg_data)
variant = get_spack_pkg_variant(pkg_data)
if not spack_pkg_name in pkgs:
if 'buildable' in pkg_data:
pkg['buildable'] = pkg_data['buildable']
if 'version' in pkg_data:
pkg['version'] = pkg_data['version']
else:
pkg = pkgs[spack_pkg_name]
return spack_pkg_name, variant, pkg
def add_to_external(pkg_name, pkg_data, pkg_version, compiler, external_specs):
ret = {}
spack_pkg_name = get_spack_pkg_name(pkg_name, pkg_data)
variant = get_spack_pkg_variant(pkg_data)
if variant:
variant = ' ' + variant
compiler_name = ' %' + compiler if compiler else ''
if 'version_map' in pkg_data:
if pkg_version:
if pkg_version in pkg_data['version_map']:
pkg_version = pkg_data['version_map'][pkg_version]
elif 'default' in pkg_data['version_map']:
pkg_version = pkg_data['version_map']['default']
if pkg_version:
name = spack_pkg_name + '@' + pkg_version + variant + compiler_name
else:
name = spack_pkg_name + variant + compiler_name
if not name in external_specs:
external_specs.add(name)
ret['spec'] = name
if 'modules' in pkg_data and pkg_version:
ret['modules'] = [pkg_data['modules'] + '/' + pkg_version]
elif 'modules' in pkg_data:
ret['modules'] = [pkg_data['modules']]
return ret
def compile_cdt_compilers_list(cdt_compilers, cdt_name, cdt_version, compiler_name):
full_cdt_name = get_full_cdt_name(cdt_name, cdt_version)
if full_cdt_name in cdt_compilers:
cdt_compilers[full_cdt_name].append(compiler_name)
else:
cdt_compilers[full_cdt_name] = [compiler_name]
def get_substring_pos_or_lastpos(string, substring, start=None, end=None):
if start and end:
cur = string.find(substring, start, end)
elif start:
cur = string.find(substring, start)
elif end:
cur = string.find(substring, 0, end)
else:
cur = string.find(substring)
if cur != -1:
return cur
else:
return len(string)-1
|
mccxj/leetcode | projecteuler/p0039_test.py | Python | apache-2.0 | 483 | 0.00207 | import unittest
class Test0039(unittest.TestCase):
def test_prob | lem(self):
m, num = 0, 0
for p in range(12, 1001):
total = 0
for a in range(1, p / 3 + 1):
for b in range(a + 1, (p - a) / 2 + 1):
c = p - a - b
if a * a + b * b == c * c:
total += 1
if m < total:
m = total
num = p
self.assertEqual(nu | m, 840) |
ixc/glamkit-blogtools | blogtools/models.py | Python | bsd-3-clause | 3,335 | 0.004198 | import datetime
from django.conf import settings
from django.db import models
from django.template.defaultfilters import slugify
from django.utils import timezone
from blogtools.utils.embargo import EmbargoedContent, EmbargoedContentPublicManager, EmbargoedContentPrivateManager
#TODO, put this in glamkit somewhere.
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class CategoryModel(models.Model):
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=255, unique=True)
class Meta:
verbose_name = 'category'
verbose_name_plural = 'categories'
abstract = True
def __unicode__(self):
return self.title
url_namespace = ''
@models.permalink
def get_absolute_url(self):
# NB that URLs need exactly 2-digit months and dates, so use strftime.
return ('%s:category' % (self.url_namespace or self._meta.app_label),
[self.slug,]
)
def public_entries(self):
return self.entries.model.public_objects.filter(category=self)
@property
def section_title(self):
return self.entries.model.section_title
class EntryModel(EmbargoedContent):
"""
A generic model for blog-esque navigation.
"""
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=255, unique=True)
author = models.ForeignKey(user_model, related_name='%(app_label)s_entries', blank=True, null=True)
byline_date = models.DateTimeField(default=timezone.now)
objects = models.Manager()
public_objects = EmbargoedContentPublicManager()
private_objects = EmbargoedContentPrivateManager()
section_title = "Blog"
class Meta:
verbose_name = "entry"
verbose_name_plural = "entries"
ordering = ['-byline_date',]
abstract = True
def __unicode__(self):
return self.title
# by default, inheriting models assume the url namespace == the app_label.
# If you define another namespace, then either copy it to the url_namespace
# attribute here, or override get_absolute_url
url_namespace = ''
@models.permalink
d | ef get_absolute_url(self):
# NB that URLs need exactly 2-digit months and dates, so use strftime.
return ('%s:detail' % (self.url_namespace or self._meta.app_label),
[self.byline_date.year, self.slug,]
)
def save(self, *args, **kwargs):
| if not self.slug:
self.slug = slugify(self.title)
# Append the pk to slugs which have a collision with a pre-existing slug.
if type(self).objects.filter(slug=self.slug).exclude(pk=self.pk).exists():
self.slug = '%s-%s' % (self.slug, slugify(self.pk))
super(EntryModel, self).save(*args, **kwargs)
def get_content(self):
return getattr(self, 'content', "")
def get_summary(self):
summary = getattr(self, 'summary')
if not summary:
return self.get_content()
return summary
class CommentedItemModel(models.Model):
allow_comments = models.BooleanField(default=True)
class Meta:
abstract = True
class FeaturedItemModel(models.Model):
is_featured = models.BooleanField(default=False,
help_text="Is this a featured Item?")
class Meta:
abstract = True |
hdinsight/hue | apps/beeswax/src/beeswax/api.py | Python | apache-2.0 | 26,505 | 0.012752 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import re
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils.translation import ugettext as _
from thrift.transport.TTransport import TTransportException
from desktop.context_processors import get_app_name
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredThriftTransportException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.lib.parameterization import substitute_variables
from metastore import parser
import beeswax.models
from beeswax.data_export import upload
from beeswax.design import HQLdesign
from beeswax.conf import USE_GET_LOG_API
from beeswax.forms import QueryForm
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException, QueryServerTimeoutException
from beeswax.views import authorized_get_design, authorized_get_query_history, make_parameterization_form,\
safe_get_design, save_design, massage_columns_for_json, _get_query_handle_and_state, \
_parse_out_hadoop_jobs
from beeswax.models import Session
LOG = logging.getLogger(__name__)
def error_handler(view_fn):
def decorator(request, *args, **kwargs):
try:
return view_fn(request, *args, **kwargs)
except Http404, e:
raise e
except Exception, e:
LOG.exception('error in %s' % view_fn)
if not hasattr(e, 'message') or not e.message:
message = str(e)
else:
message = force_unicode(e.message, strings_only=True, errors='replace')
if 'Invalid OperationHandle' in message and 'id' in kwargs:
# Expired state.
query_history = authorized_get_query_history(request, kwargs['id'], must_exist=False)
if query_history:
query_history.set_to_expired()
query_history.save()
response = {
'status': -1,
'message': message,
}
if re.search('database is locked|Invalid query handle|not JSON serializable', message, re.IGNORECASE):
response['status'] = 2 # Frontend will not display this type of error
LOG.warn('error_handler silencing the exception: %s' % e)
return JsonResponse(response)
return decorator
@error_handler
def autocomplete(request, database=None, table=None, column=None, nested=None):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
do_as = request.user
if (request.user.is_superuser or request.user.has_hue_permission(action="impersonate", app="security")) and 'doas' in request.GET:
do_as = User.objects.get(username=request.GET.get('doas'))
db = dbms.get(do_as, query_server)
response = _autocomplete(db, database, table, column, nested)
return JsonResponse(response)
def _autocomplete(db, database=None, table=None, column=None, nested=None):
response = {}
try:
if database is None:
response['databases'] = db.get_databases()
elif table is None:
response['tables'] = db.get_tables(database=database)
elif column is None:
t = db.get_table(database, table)
response['hdfs_link'] = t.hdfs_link
response['columns'] = [column.name for column in t.cols]
response['extended_columns'] = massage_columns_for_json(t.cols)
else:
col = db.get_column(database, table, column)
if col:
parse_tree = parser.parse_column(col.name, col.type, col.comment)
if nested:
parse_tree = _extract_nested_type(parse_tree, nested)
response = parse_tree
# If column or nested type is scalar/primitive, add sample of values
if parser.is_scalar_type(parse_tree['type']):
table_obj = db.get_table(database, table)
sample = db.get_sample(database, table_obj, column, nested)
if sample:
sample = set([row[0] for row in sample.rows()])
response['sample'] = sorted(list(sample))
else:
raise Exception('Could not find column `%s`.`%s`.`%s`' % (database, table, column))
except (QueryServerTimeoutException, TTransportException), e:
response['code'] = 503
response['error'] = e.message
except Exception, e:
LOG.warn('Autocomplete data fetching error: %s' % e)
response['code'] = 500
response['error'] = e.message
return response
@error_handler
def parameters(request, design_id=None):
response = {'status': -1, 'message': ''}
# Use POST request to not confine query length.
if request.method != 'POST':
response['message'] = _('A POST request is required.')
parameterization_form_cls = make_parameterization_form(request.POST.get('query-query', ''))
if parameterization_form_cls:
parameterization | _form = parameterization_form_cls(prefix="parameterization")
response['parameters'] = [{'parameter': field.html_name, 'name': field.name} for field in parameterization_form]
response['status']= 0
else:
response['parameters'] = []
response['status']= 0
return JsonResponse(response)
@error_handler
def execute_directly(request, query, design, query_server, tablename=None, **kwargs):
if design is not None:
design = authorized_get_design(re | quest, design.id)
parameters = kwargs.pop('parameters', None)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
history_obj = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': history_obj.id})
if parameters is not None:
history_obj.update_extra('parameters', parameters)
history_obj.save()
response = {
'status': 0,
'id': history_obj.id,
'watch_url': watch_url,
'statement': history_obj.get_current_statement(),
'is_redacted': history_obj.is_redacted
}
return JsonResponse(response)
@error_handler
def watch_query_refresh_json(request, id):
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
if not request.POST.get('next'): # We need this as multi query would fail as current query is closed
handle, state = _get_query_handle_and_state(query_history)
query_history.save_state(state)
# Go to next statement if asked to continue or when a statement with no dataset finished.
try:
if request.POST.get('next') or (not query_history.is_finished() and query_history.is_success() and not query_history.has_results):
query_history = db.execute_next_statement(query_history, request.POST.get('query-query'))
handle, state = _get_query_handle_and_state(query_history)
except QueryServerException, ex:
raise ex
except Exception, ex:
LOG.exception(ex)
handle, state = _get_query_handle_and_state(query_history)
try:
start_over = request.POST.get('log-start-over') == 'true'
log = db.get_log(handle, start_over=start_over)
except Exception, ex:
log = str(ex)
jobs = _parse_out_hadoop_jobs(log)
job_urls = massage_job_urls_for_json(jobs)
result = {
'status': -1,
'log': log,
'jobs': jobs,
'jobUrls': job_urls,
'isSuccess': query_history.is_success(),
'isFailure': query_history.is_failu |
naBe-/ants_visualization | board.py | Python | gpl-2.0 | 2,771 | 0.000361 | """
Gameboard representation
"""
import pygame
from ant import Ant
from tile import Tile, TILE_SIZE
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
YELLOW = (255, 255, 0)
BROWN = (102, 51, 0)
class Board(object):
def __init__(self, width, height, colony, food):
self.width = width
self.height = height
self.colony = [
colony[0]*TILE_SIZE, colony[1]*TILE_SIZE,
TILE_SIZE, TILE_SIZE
]
self.food = food
screen_size = [width*TILE_SIZE, height*TILE_SIZE]
| self.screen = pygame.display.set_mode(screen_size)
self.screen.fill(WHITE)
self.tiles = []
self.ants = {}
# Populate grid
for w in range(0, width):
for h in range(0, height):
square = Tile(w*TILE_SIZE, h*TILE_SIZE)
self.tiles.append(square)
rect, col | or = square.render()
pygame.draw.rect(self.screen, color, rect)
# Place colony
pygame.draw.rect(self.screen, GREEN, self.colony)
# Place food
food_pos = food['position']
pile = [[food_pos[0]*TILE_SIZE+TILE_SIZE/2, food_pos[1]*TILE_SIZE],
[food_pos[0]*TILE_SIZE, (food_pos[1]+1)*TILE_SIZE],
[(food_pos[0]+1)*TILE_SIZE, (food_pos[1]+1)*TILE_SIZE]]
pygame.draw.polygon(self.screen, YELLOW, pile)
pygame.display.set_caption("Ant colony visualization")
pygame.display.flip()
def update(self, data):
map(lambda tile, pheromones: tile.update(**pheromones),
self.tiles, data['board'])
for tile in self.tiles:
rect, color = tile.render()
pygame.draw.rect(self.screen, color, rect)
for ant_repr in data['ants']:
if ant_repr['id'] in self.ants:
ant = self.ants[ant_repr['id']]
ant.position = ant_repr['position']
else:
ant = Ant(**ant_repr)
self.ants[ant_repr['id']] = ant
pos = map(lambda coord: coord*TILE_SIZE+TILE_SIZE/2, ant.position)
ant_color = YELLOW if ant.carries_food else GREEN
pygame.draw.circle(self.screen, ant_color, pos, TILE_SIZE/5)
# Place colony
pygame.draw.rect(self.screen, GREEN, self.colony)
# Place food
food_pos = self.food['position']
pile = [[food_pos[0]*TILE_SIZE+TILE_SIZE/2, food_pos[1]*TILE_SIZE],
[food_pos[0]*TILE_SIZE, (food_pos[1]+1)*TILE_SIZE],
[(food_pos[0]+1)*TILE_SIZE, (food_pos[1]+1)*TILE_SIZE]]
pygame.draw.polygon(self.screen, YELLOW, pile)
pygame.display.flip()
def exit(self):
pygame.quit()
|
KaroDievas/car-sound-classification-with-keras | Main.py | Python | mit | 88 | 0.011364 | from M | odels import DataModel
dataModel = DataModel.DataModel()
dataModel.prepare | _data() |
OmeGak/indico | indico/modules/users/controllers.py | Python | mit | 29,032 | 0.003169 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import namedtuple
from io import BytesIO
from operator import attrgetter, itemgetter
from dateutil.relativedelta import relativedelta
from flask import flash, jsonify, redirect, request, session
from markupsafe import Markup, escape
from marshmallow import fields
from sqlalchemy.orm import joinedload, load_only, subqueryload, undefer
from sqlalchemy | .orm.exc import StaleDataError
from webargs import validate
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core import signals
from indico.core. | auth import multipass
from indico.core.db import db
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.core.marshmallow import mm
from indico.core.notifications import make_email, send_email
from indico.legacy.common.cache import GenericCache
from indico.modules.admin import RHAdminBase
from indico.modules.auth import Identity
from indico.modules.auth.models.registration_requests import RegistrationRequest
from indico.modules.auth.util import register_user
from indico.modules.categories import Category
from indico.modules.events import Event
from indico.modules.events.util import serialize_event_for_ical
from indico.modules.users import User, logger, user_management_settings
from indico.modules.users.forms import (AdminAccountRegistrationForm, AdminsForm, AdminUserSettingsForm, MergeForm,
SearchForm, UserDetailsForm, UserEmailsForm, UserPreferencesForm)
from indico.modules.users.models.emails import UserEmail
from indico.modules.users.operations import create_user
from indico.modules.users.util import (get_linked_events, get_related_categories, get_suggested_categories, merge_users,
search_users, serialize_user)
from indico.modules.users.views import WPUser, WPUserDashboard, WPUsersAdmin
from indico.util.date_time import now_utc
from indico.util.event import truncate_path
from indico.util.i18n import _
from indico.util.marshmallow import HumanizedDate, validate_with_message
from indico.util.signals import values_from_signal
from indico.util.string import make_unique_token
from indico.web.args import use_kwargs
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import send_file, url_for
from indico.web.forms.base import FormDefaults
from indico.web.http_api.metadata import Serializer
from indico.web.rh import RHProtected, RHTokenProtected
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
IDENTITY_ATTRIBUTES = {'first_name', 'last_name', 'email', 'affiliation', 'full_name'}
UserEntry = namedtuple('UserEntry', IDENTITY_ATTRIBUTES | {'profile_url', 'user'})
def get_events_in_categories(category_ids, user, limit=10):
"""Get all the user-accessible events in a given set of categories."""
tz = session.tzinfo
today = now_utc(False).astimezone(tz).date()
query = (Event.query
.filter(~Event.is_deleted,
Event.category_chain_overlaps(category_ids),
Event.start_dt.astimezone(session.tzinfo) >= today)
.options(joinedload('category').load_only('id', 'title'),
joinedload('series'),
subqueryload('acl_entries'),
load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'access_key',
'protection_mode', 'series_id', 'series_pos', 'series_count'))
.order_by(Event.start_dt, Event.id))
return get_n_matching(query, limit, lambda x: x.can_access(user))
class RHUserBase(RHProtected):
flash_user_status = True
allow_system_user = False
def _process_args(self):
if not session.user:
return
self.user = session.user
if 'user_id' in request.view_args:
self.user = User.get(request.view_args['user_id'])
if self.user is None:
raise NotFound('This user does not exist')
elif request.method == 'GET' and not request.is_xhr and self.flash_user_status:
# Show messages about the user's status if it's a simple GET request
if self.user.is_deleted:
if self.user.merged_into_id is not None:
msg = _('This user has been merged into <a href="{url}">another user</a>.')
flash(Markup(msg).format(url=url_for(request.endpoint, self.user.merged_into_user)), 'warning')
else:
flash(_('This user is marked as deleted.'), 'warning')
if self.user.is_pending:
flash(_('This user is marked as pending, i.e. it has been attached to something but never '
'logged in.'), 'warning')
if not self.allow_system_user and self.user.is_system:
return redirect(url_for('users.user_profile'))
def _check_access(self):
RHProtected._check_access(self)
if not self.user.can_be_modified(session.user):
raise Forbidden('You cannot modify this user.')
class RHUserDashboard(RHUserBase):
management_roles = {'conference_creator', 'conference_chair', 'conference_manager', 'session_manager',
'session_coordinator', 'contribution_manager'}
reviewer_roles = {'paper_manager', 'paper_judge', 'paper_content_reviewer', 'paper_layout_reviewer',
'contribution_referee', 'contribution_editor', 'contribution_reviewer', 'abstract_reviewer',
'track_convener'}
attendance_roles = {'contributor', 'contribution_submission', 'abstract_submitter', 'abstract_person',
'registration_registrant', 'survey_submitter', 'lecture_speaker'}
def _process(self):
self.user.settings.set('suggest_categories', True)
categories = get_related_categories(self.user)
categories_events = []
if categories:
category_ids = {c['categ'].id for c in categories.itervalues()}
categories_events = get_events_in_categories(category_ids, self.user)
from_dt = now_utc(False) - relativedelta(weeks=1, hour=0, minute=0, second=0)
linked_events = [(event, {'management': bool(roles & self.management_roles),
'reviewing': bool(roles & self.reviewer_roles),
'attendance': bool(roles & self.attendance_roles)})
for event, roles in get_linked_events(self.user, from_dt, 10).iteritems()]
return WPUserDashboard.render_template('dashboard.html', 'dashboard',
user=self.user,
categories=categories,
categories_events=categories_events,
suggested_categories=get_suggested_categories(self.user),
linked_events=linked_events)
class RHExportDashboardICS(RHTokenProtected):
@use_kwargs({
'from_': HumanizedDate(data_key='from', missing=lambda: now_utc(False) - relativedelta(weeks=1)),
'include': fields.List(fields.Str(), missing={'linked', 'categories'}),
'limit': fields.Integer(missing=100, validate=lambda v: 0 < v <= 500)
})
def _process(self, from_, include, limit):
categories = get_related_categories(self.user)
categories_events = []
if categories:
category_ids = {c['categ'].id for c in categories.itervalues()}
categories_events = get_events_in_categories(category_ids, self.user, limit=limit)
linked_events = get_linked_events(
self.user,
from_,
limit=limit,
load_also=('description', 'own_room_id', 'own_venue_id', 'own_room_name', 'own_venue_nam |
franek/weboob | modules/parolesmania/backend.py | Python | agpl-3.0 | 1,772 | 0.000564 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the Li | cense, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied w | arranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import ICapLyrics, SongLyrics
from weboob.tools.backend import BaseBackend
from .browser import ParolesmaniaBrowser
from urllib import quote_plus
__all__ = ['ParolesmaniaBackend']
class ParolesmaniaBackend(BaseBackend, ICapLyrics):
NAME = 'parolesmania'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'julien.veyssier@aiur.fr'
VERSION = '0.f'
DESCRIPTION = 'parolesmania lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = ParolesmaniaBrowser
def create_default_browser(self):
return self.create_browser()
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
sl = self.get_lyrics(songlyrics.id)
songlyrics.content = sl.content
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
|
rwl/traitsbackendpyjamas | enthought/traits/ui/pyjd/__init__.py | Python | mit | 1,896 | 0.002637 | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# Copyright (c) 2009, Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#---------------------------------------------- | --------------------------------
""" Defines the concrete implementations of the traits T | oolkit interface for
the Pyjamas web application user interface.
"""
__import__('pkg_resources').declare_namespace(__name__)
#------------------------------------------------------------------------------
# Define the reference to the exported GUIToolkit object:
#------------------------------------------------------------------------------
import toolkit
# Reference to the GUIToolkit object for Pyjamas.
toolkit = toolkit.GUIToolkit()
# EOF -------------------------------------------------------------------------
|
svdgraaf/appie | appie/api.py | Python | gpl-2.0 | 1,743 | 0.001721 | import requests
class Api(object):
_username = ''
_password = ''
_cookies = {}
_logged_in = False
_shoppinglist = 0
_list = None
def __init__(self, username, password, login=True):
self._username = username
self._password = password
self.login(username, password)
def login(self, username, password):
url = 'https://www.ah.nl/mijn/inloggen/basis'
payload = {
'userName': username,
'password': password
}
response = requests.post(url, data=payload)
self._cookies = response.cookies
def add(self, product_id, quantity=1):
payload = {
'type':'PRODUCT',
'item': {
'id':str(product_id)
},
'quantity': int(quantity),
'originCode': "PSE"
}
url = 'http://www.ah.nl/service/rest/shoppinglists/%d/items' % self._shoppinglist
response = requests.post(url, cookies=self._cookies, json=payload)
if response.status_code == 200:
return True
else:
return False
def _update_list(self):
| url = 'http://www.ah.nl/service/rest/shoppinglists/0/'
response = requests.get(url, cookies=self._cookies, json=payload)
self._list = response.json()
return self._list
@property
def list(self):
return self._list
def is_o | n_list(self, product_id):
if self._list:
self._update_list()
counter = 0
for item in self._list['items']['_embedded']['items']:
if item['item']['id'] == product_id:
return item['id']
# @TODO: return proper exception?
return None
|
gpalsingh/dspbooksspider | dspbooks/pipelines.py | Python | mit | 1,349 | 0.003706 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
import os
from scrapy.pipelines.files import FilesPipeline
from urllib import unquote, quote
class DspbooksPipeline(FilesPipeline):
def file_path(self, request, response=None, info=None):
## start of deprecation warning block (can be removed in the future)
def _warn():
from scrapy.exceptions import ScrapyDeprecationWarning
import warnings
warnings.warn('FilesPipeline.file_key(url) method is deprecated, please use '
'file_path(request, response=None, info=None) instead',
category=ScrapyDeprecationWarning, stacklevel=1)
# check if called from file_key with ur | l as first argument
if not isinstance(request, scrapy.Request):
_warn()
url = request
else:
url = request.url
# detect if file_key() method has been overridden
if not hasattr(self.file_key, '_base'):
_warn()
return self.file_key(url)
## end of deprecation warning block
url_parts = unquote(url).split('/')
return os.path.join(*url_parts[4:])
| |
googleads/google-ads-python | google/ads/googleads/v9/services/services/feed_placeholder_view_service/client.py | Python | apache-2.0 | 19,093 | 0.001257 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import feed_placeholder_view
from google.ads.googleads.v9.services.types import feed_placeholder_view_service
from .transports.base import (
FeedPlaceholderViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import FeedPlaceholderViewServiceGrpcTransport
class FeedPlaceholderViewServiceClientMeta(type):
"""Metaclass for the FeedPlaceholderViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[FeedPlaceholderViewServiceTransport]]
_transport_registry["grpc"] = FeedPlaceholderViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[FeedPlaceholderViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport i | s requested, return that one.
if label:
return cls._transport_registry[label]
# No transport | is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FeedPlaceholderViewServiceClient(
metaclass=FeedPlaceholderViewServiceClientMeta
):
"""Service to fetch feed placeholder views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedPlaceholderViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FeedPlaceholderViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FeedPlaceholderViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
FeedPlaceholderViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def feed_placeholder_view_path(
customer_id: str, placeholder_type: str,
) -> str:
"""Return a fully-qualified feed_placeholder_view string."""
return "customers/{customer_id}/feedPlaceholderViews/{placeholder_type}".format(
customer_id=customer_id, placeholder_type=placeholder_type,
)
@staticmethod
def parse_feed_placeholder_view_path(path: str) -> Dict[str, str]:
"""Parse a feed_placeholder_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/feedPlaceholderViews/(?P<placeholder_type>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified o |
vgrem/SharePointOnline-REST-Python-Client | office365/sharepoint/helpers/querystring_builder.py | Python | mit | 1,822 | 0.001647 | import logging
from office365.sharepoint.helpers.utils import to_camel
logger = logging.getLogger(__name__)
class QueryStringBuilder:
"""class to map web-querystring dictionary to sharepoint-querystring"""
date_operators = ['ge', 'gt', 'le', 'lt']
mapping_operator = {
'gte': 'ge',
'gt': 'gt',
'lte': 'le',
'lt': 'lt',
'not': 'ne',
'contains': 'substringof'
}
search = []
filters = {}
def __init__(self, filters):
super().__init__()
if filters:
self.filters = filters
def get_filter_querystring(self):
filter_queries = []
for filter_name, filter_value in self.filters.items():
# operator
querystring_operator = filter_name.split('__')[-1]
operator = self.mapp | ing_operator.get(querystring_operator, 'eq')
# filter
filter_name = to_camel(filter_name.split('__')[0])
if operator in self.date_operators:
values = ["{}T00:00:00Z".format(filter_value)] # 2016-03-26
query = ' or '.join([f"{filter_name} {operator} datetime'{value}'" for value in values])
el | if operator == 'substringof':
values = filter_value.split(',')
query = ' or '.join([f"{operator}('{value}', {filter_name})" for value in values])
else:
values = filter_value.split(',')
query = ' or '.join([f"{filter_name} {operator} '{value}'" for value in values])
if len(values) > 1:
query = f'({query})'
filter_queries.append(query)
logger.info(query)
return str(" and ".join(filter_queries))
def get_querystring(self):
return self.get_filter_querystring() or ''
|
Tchanders/socorro | socorro/unittest/collector/test_wsgi_breakpad_collector.py | Python | mpl-2.0 | 17,334 | 0.001154 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import StringIO
import gzip
import web
import mock
from nose.tools import eq_, ok_
from datetime import datetime
from contextlib import closing
from configman.dotdict import DotDict
from socorro.collector.wsgi_breakpad_collector import BreakpadCollector
from socorro.collector.throttler import ACCEPT, IGNORE, DEFER
from socorro.unittest.testbase import TestCase
class ObjectWithValue(object):
def __init__(self, v):
self.value = v
class TestCollectorApp(TestCase):
def get_standard_config(self):
config = DotDict()
config.logger = mock.MagicMock()
config.throttler = mock.MagicMock()
config.collector = DotDict()
config.collector.collector_class = BreakpadCollector
config.collector.dump_id_prefix = 'bp-'
config.collector.dump_field = 'dump'
config.collector.accept_submitted_crash_id = False
config.collector.accept_submitted_legacy_processing = False
config.collector.checksum_method = hashlib.md5
config.crash_storage = mock.MagicMock()
return config
def test_setup(self):
config = self.get_standard_config()
c = BreakpadCollector(config)
eq_(c.config, config)
eq_(c.logger, config.logger)
eq_(c.throttler, config.throttler)
eq_(c.crash_storage, config.crash_storage)
eq_(c.dump_id_prefix, 'bp-')
eq_(c.dump_field, 'dump')
def test_make_raw_crash(self):
config = self.get_standard_config()
form = DotDict()
form.ProductName = 'FireSquid'
form.Version = '99'
form.dump = 'fake dump'
form.some_field = '\x0023'
form.some_other_field = ObjectWithValue('XYZ')
class BreakpadCollectorWithMyForm(config.collector.collector_class):
def _form_as_mapping(self):
return form
c = BreakpadCollectorWithMyForm(config)
rc, dmp = c._get_raw_crash_from_form()
eq_(rc.ProductName, 'FireSquid')
eq_(rc.Version, '99')
eq_(rc.some_field, '23')
eq_(rc.some_other_field, 'XYZ')
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST(self, mocked_web, mocked_webapi, mocked_utc_now, mocked_time):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform.ProductName = '\x00FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_reject_browser_with_hangid(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform[u'\u0000ProductName'] = 'FireSquid'
rawform.Version = '99'
rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.HangID = 'xyz'
rawform.ProcessType = 'browser'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.throttle_rate = None
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc = dict(erc)
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (IGNORE, None)
r = c.POST()
eq_(r, "Unsupported=1\n")
ok_(not
c.crash_storage.save_raw_crash.call_count
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web')
def test_POST_with_existing_crash_id(
self,
mocked_web,
mocked_webapi,
mocked_utc_now,
mocked_time
):
config = self.get_standard_config()
c = BreakpadCollector(config)
rawform = DotDict()
rawform | .ProductName = 'FireSquid'
rawform.Version = '99'
| rawform.dump = DotDict({'value': 'fake dump', 'file': 'faked file'})
rawform.aux_dump = DotDict({'value': 'aux_dump contents', 'file': 'silliness'})
rawform.some_field = '23'
rawform.some_other_field = ObjectWithValue('XYZ')
rawform.uuid = '332d798f-3cx\x0042-47a5-843f-a0f892140107'
form = DotDict(rawform)
form.dump = rawform.dump.value
erc = DotDict()
erc.ProductName = 'FireSquid'
erc.Version = '99'
erc.some_field = '23'
erc.some_other_field = 'XYZ'
erc.legacy_processing = ACCEPT
erc.timestamp = 3.0
erc.submitted_timestamp = '2012-05-04T15:10:00'
erc.throttle_rate = 100
erc.dump_checksums = {
'dump': '2036fd064f93a0d086cf236c5f0fd8d4',
'aux_dump': 'aa2e5bf71df8a4730446b2551d29cb3a',
}
erc = dict(erc)
mocked_web.input.return_value = form
mocked_webapi.rawinput.return_value = rawform
mocked_utc_now.return_value = datetime(2012, 5, 4, 15, 10)
mocked_time.time.return_value = 3.0
c.throttler.throttle.return_value = (ACCEPT, 100)
r = c.POST()
ok_(r.startswith('CrashID=bp-'))
ok_(r.endswith('120504\n'))
erc['uuid'] = r[11:-1]
c.crash_storage.save_raw_crash.assert_called_with(
erc,
{'dump':'fake dump', 'aux_dump':'aux_dump contents'},
r[11:-1]
)
@mock.patch('socorro.collector.wsgi_breakpad_collector.time')
@mock.patch('socorro.collector.wsgi_breakpad_collector.utc_now')
@mock.patch('socorro.collector.wsgi_breakpad_collector.web.webapi')
@mock.patch('socorro. |
pkimber/login | example_login/base.py | Python | apache-2.0 | 6,137 | 0.000815 | # -*- encoding: utf-8 -*-
"""
Django settings for login project.
"""
from django.core.urlresolvers import reverse_lazy
DEBUG = True
TESTING = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
# We use the 'SITE_NAME' for the name of the database and the name of the
# cloud files container.
SITE_NAME = 'app_login'
ADMINS = (
('admin', 'code@pkimber.net'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the cur | rent locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_RO | OT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#9x3dk(nl82sihl7c^u_#--yp((!g2ehd_1pmp)fpgx=h9(l9='
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'reversion.middleware.RevisionMiddleware',
)
ROOT_URLCONF = 'example_login.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example_login.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'string_if_invalid': '**** INVALID EXPRESSION: %s ****',
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'example_login',
'base',
'login',
'mail',
'reversion',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Celery
BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
# https://kfalck.net/2013/02/21/run-multiple-celeries-on-a-single-redis
CELERY_DEFAULT_QUEUE = '{}'.format(SITE_NAME)
# http://celery.readthedocs.org/en/latest/userguide/tasks.html#disable-rate-limits-if-they-re-not-used
CELERY_DISABLE_RATE_LIMITS = True
# django-compressor
COMPRESS_ENABLED = False # defaults to the opposite of DEBUG
# See the list of constants at the top of 'mail.models'
MAIL_TEMPLATE_TYPE = 'django'
DEFAULT_FROM_EMAIL = 'notify@pkimber.net'
# mandrill
#EMAIL_BACKEND = 'djrill.mail.backends.djrill.DjrillBackend'
#MANDRILL_API_KEY = get_env_variable('MANDRILL_API_KEY')
#MANDRILL_USER_NAME = get_env_variable('MANDRILL_USER_NAME')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FTP_STATIC_DIR = None
FTP_STATIC_URL = None
# URL where requests are redirected after login when the contrib.auth.login
# view gets no next parameter.
LOGIN_REDIRECT_URL = reverse_lazy('project.dash')
SENDFILE_BACKEND = 'sendfile.backends.development'
SENDFILE_ROOT = 'media-private'
|
sdpython/ensae_teaching_cs | _unittests/ut_td_1a/test_classique_permutation.py | Python | mit | 794 | 0.001259 | """
@brief test log(time=1s)
"""
import unittest
import itertools
from ensae_teaching_cs.td_1a.construction_classique import enumerate_permutations_recursive, enumerate_permutations
class TestClassiquesPermutation(unittest.TestCase):
def test_permutation(self):
self.maxDiff = None
ens = list(range(5))
lt = list(tuple(p) for p in enumerate_permutation | s_rec | ursive(ens))
self.assertEqual(len(lt), 120)
res = list(tuple(p) for p in itertools.permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
res = list(tuple(p) for p in enumerate_permutations(ens))
self.assertEqual(len(res), 120)
self.assertEqual(set(res), set(lt))
if __name__ == "__main__":
unittest.main()
|
datafiniti/Diamond | src/collectors/nfs/nfs.py | Python | mit | 8,613 | 0 | # coding=utf-8
"""
The NfsCollector collects nfs utilization metrics using /proc/net/rpc/nfs.
#### Dependencies
* /proc/net/rpc/nfs
"""
import diamond.collector
import os
class NfsCollector(diamond.collector.Collector):
PROC = '/proc/net/rpc/nfs'
def get_default_config_help(self):
config_help = super(NfsCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NfsCollector, self).get_default_config()
config.update({
'enabled': False,
'path': 'nfs'
})
return config
def collect(self):
"""
Collect stats
"""
if os.access(self.PROC, os.R_OK):
results = {}
# Open file
file = open(self.PROC)
for line in file:
line = line.split()
if line[0] == 'net':
results['net.packets'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif line[0] == 'rpc':
results['rpc.calls'] = line[1]
results['rpc.retrans'] = line[2]
results['rpc.authrefrsh'] = line[3]
elif line[0] == 'proc2':
results['v2.null'] = line[1]
results['v2.getattr'] = line[2]
results['v2.setattr'] = line[3]
results['v2.root'] = line[4]
results['v2.lookup'] = line[5]
results['v2.readlink'] = line[6]
results['v2.read'] = line[7]
results['v2.wrcache'] = line[8]
results['v2.write'] = line[9]
results['v2.create'] = line[10]
results['v2.remove'] = line[11]
results['v2.rename'] = line[12]
results['v2.link'] = line[13]
results['v2.symlink'] = line[14]
results['v2.mkdir'] = line[15]
results['v2.rmdir'] = line[16]
results['v2.readdir'] = line[17]
results['v2.fsstat'] = line[18]
elif line[0] == 'proc3':
results['v3.null'] = line[1]
results['v3.getattr'] = line[2]
results['v3.setattr'] = line[3]
results['v3.lookup'] = line[4]
results['v3.access'] = line[5]
results['v3.readlink'] = line[6]
results['v3.read'] = line[7]
results['v3.write'] = line[8]
results['v3.create'] = line[9]
results['v3.mkdir'] = line[10]
results['v3.symlink'] = line[11]
results['v3.mknod'] = line[12]
results['v3.remove'] = line[13]
results['v3.rmdir'] = line[14]
results['v3.rename'] = line[15]
results['v3.link'] = line[16]
results['v3.readdir'] = line[17]
results['v3.readdirplus'] = line[18]
results['v3.fsstat'] = line[19]
results['v3.fsinfo'] = line[20]
results['v3.pathconf'] = line[21]
results['v3.commit'] = line[22]
elif line[0] == 'proc4':
results['v4.null'] = line[1]
results['v4.read'] = line[2]
results['v4.write'] = line[3]
results['v4.commit'] = line[4]
results['v4.open'] = line[5]
results['v4.open_conf'] = line[6]
results['v4.open_noat'] = line[7]
results['v4.open_dgrd'] = line[8]
results['v4.close'] = line[9]
results['v4.setattr'] = line[10]
results['v4.fsinfo'] = line[11]
results['v4.renew'] = line[12]
results['v4.setclntid'] = line[13]
results['v4.confirm'] = line[14]
results['v4.lock'] = line[15]
results['v4.lockt'] = line[16]
results['v4.locku'] = line[17]
results['v4.access'] = line[18]
results['v4.getattr'] = line[19]
results['v4.lookup'] = line[20]
results['v4.lookup_root'] = line[21]
results['v4.remove'] = line[22]
results['v4.rename'] = line[23]
results['v4.link'] = line[24]
results['v4.symlink'] = line[25]
results['v4.create'] = line[26]
results['v4.pathconf'] = line[27]
results['v4.statfs'] = line[28]
results['v4.readlink'] = line[29]
results['v4.readdir'] = line[30]
try:
results['v4.server_caps'] = line[31]
except IndexError:
pass
try:
results['v4.delegreturn'] = line[32]
except IndexError:
pass
try:
results['v4.getacl'] = line[33]
except IndexError:
pass
try:
results['v4.setacl'] = line[34]
except IndexError:
pass
try:
results['v4.fs_locations'] = line[35]
except IndexError:
pass
try:
results['v4.rel_lkowner'] = line[36]
except IndexError:
pass
try:
results['v4.exchange_id'] = line[37]
except IndexError:
pass
try:
results['v4.create_ses'] = line[38]
except IndexError:
pass
try:
results['v4.destroy_ses'] = line[39]
except IndexError:
pass
try:
results['v4.sequence'] = line[40]
except IndexError:
pass
try:
results['v4.get_lease_t'] = line[41]
except IndexError:
pass
try:
results['v4.reclaim_comp'] = line[42]
except IndexError:
pass
try:
results['v4.layoutget'] = line[43]
except IndexError:
pass
try:
results['v4.layoutcommit'] = line[44]
except IndexError:
pass
try:
results['v4.layoutreturn'] = line[45]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[46]
except IndexError:
pass
try:
results['v4.getdevinfo'] = line[47]
except IndexError:
pass
try: |
results['v4.ds_write'] = line[48]
except IndexError:
pass
try:
results['v4.ds | _commit'] = line[49]
except IndexError:
pass
try:
results['v4.getdevlist'] = line[50]
|
rth/PyAbel | examples/example_all_O2.py | Python | mit | 4,354 | 0.005282 | # -*- coding: utf-8 -*-
# This example compares the available inverse Abel transform methods
# currently - direct, hansenlaw, and basex
# processing the O2- photoelectron velocity-map image
#
# Note it transforms only the Q0 (top-right) quadrant
# using the fundamental transform code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import collections
import matplotlib.pylab as plt
from time import time
# inverse Abel transform methods -----------------------------
# dictionary of method: function()
transforms = {
"basex": abel.basex.basex_transform,
"linbasex": abel.linbasex.linbasex_transform,
"direct": abel.direct.direct_transform,
"hansenlaw": abel.hansenlaw.hansenlaw_transform,
"onion_bordas": abel.onion_bordas.onion_bordas_transform,
"onion_dasch": abel.dasch.onion_peeling_transform,
"three_point": abel.dasch.three_point_transform,
"two_point" : abel.dasch.two_point_transform,
}
# sort dictionary
transforms = collections.OrderedDict(sorted(transforms.items()))
ntrans = np.size(transforms.keys()) # number of transforms
# Image: O2- VMI 1024x1024 pixel ------------------
IM = np.loadtxt('data/O2-ANU1024.txt.bz2')
# recenter the image to mid-pixel (odd image width)
IModd = abel.tools.center.center_image(IM, center="slice", odd_size=True)
h, w = IModd.shape
print("centered image 'data/O2-ANU2048.txt' shape = {:d}x{:d}".format(h, w))
# split image into quadrants
Q = abel.tools.symmetry.get_image_quadrants(IModd, reorient=True)
Q0 = Q[0]
Q0fresh = Q0.copy() # keep clean copy
print ("quadrant shape {}".format(Q0.shape))
# Intensity mask used for intensity normalization
# quadrant image region of | bright pixels
mask = np.zeros(Q0.shape, dtype=bool)
mask[500:512, 358:365] = True
# process Q0 quadrant using each method --------------------
iabelQ = [] # keep inverse Abel transformed image
sp = [] # speed distributions
meth = [] # methods
for q, method in enumerate(transforms.keys()):
Q0 = Q0fresh.copy() # top-right quadrant of O2- image
print ("\n------- {:s} inverse ...".format(method))
t0 = time()
# inverse Abel transform using 'met | hod'
IAQ0 = transforms[method](Q0, direction="inverse", dr=0.1)
print (" {:.1f} sec".format(time()-t0))
# polar projection and speed profile
radial, speed = abel.tools.vmi.angular_integration(IAQ0, origin=(0, 0),
dr=0.1)
# normalize image intensity and speed distribution
IAQ0 /= IAQ0[mask].max()
speed /= speed[radial > 50].max()
# keep data for plots
iabelQ.append(IAQ0)
sp.append((radial, speed))
meth.append(method)
# reassemble image, each quadrant a different method
# plot inverse Abel transformed image slices, and respective speed distributions
ax0 = plt.subplot2grid((1, 2), (0, 0))
ax1 = plt.subplot2grid((1, 2), (0, 1))
def ann_plt (quad, subquad, txt):
# -ve because numpy coords from top
annot_angle = -(30+30*subquad+quad*90)*np.pi/180
annot_coord = (h/2+(h*0.8)*np.cos(annot_angle)/2,
w/2+(w*0.8)*np.sin(annot_angle)/2)
ax0.annotate(txt, annot_coord, color="yellow", horizontalalignment='left')
# for < 4 images pad using a blank quadrant
r, c = Q0.shape
Q = np.zeros((4, r, c))
indx = np.triu_indices(iabelQ[0].shape[0])
iq = 0
for q in range(4):
Q[q] = iabelQ[iq].copy()
ann_plt(q, 0, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.3)
iq += 1
if iq < len(transforms):
Q[q][indx] = np.triu(iabelQ[iq])[indx]
ann_plt(q, 1, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.3)
iq += 1
# reassemble image from transformed (part-)quadrants
im = abel.tools.symmetry.put_image_quadrants((Q[0], Q[1], Q[2], Q[3]),
original_image_shape=IModd.shape)
ax0.axis('off')
ax0.set_title("inverse Abel transforms")
ax0.imshow(im, vmin=0, vmax=0.8)
ax1.set_title("speed distribution")
ax1.axis(ymin=-0.05, ymax=1.1, xmin=50, xmax=450)
ax1.legend(loc=0, labelspacing=0.1, fontsize=10, frameon=False)
plt.tight_layout()
# save a copy of the plot
plt.savefig('example_all_O2.png', dpi=100)
plt.show()
|
teamCarel/EyeTracker | setup.py | Python | lgpl-3.0 | 1,584 | 0.015152 | import sys,os
from cx_Freeze import setup, Executable
# Dependencies are automatically detected, but it might need fine tuning.
shared_modules_base=os.path.join(os.path.abspath(__file__).rsplit(__file__, 1)[0], 'src', 'shared_modules')
capture_base=os.path.join(os.path.abspath(__file__).rsplit(__file__, 1)[0], 'src', 'capture')
eyetracker_base=os.path.join(os.path.abspath(__file__).rsplit(__file__, 1)[0], 'src', 'eyetracker')
sys.path.append(os.path.join(shared_modules_base,''))
sys.path.append(os.path.join(shared_modules_base, 'gl_utils'))
sys.path.append(os.path.join(shared_modules_base, 'calibration_routines'))
sys.path.append(os.path.join(shared_modules_base, 'calibration_routines', 'optimization_calibration'))
sys.path.append(os.path.join(shared_modules_base, 'math_helper'))
sys.path.append(os.path.join(shared_modu | les_base, 'video_capture'))
sys.path.append(os.path.join(eyetracker_base,''))
sys.path.append(os.path.join(eyetracker_base, 'GUI'))
sys.path.append(os.path.join(capture_base,''))
sys.path.append(os.path.join(capture_base, 'pupil_detectors'))
build_exe_options = {"packages": ["os", "multiprocessing", "glfw", "OpenGL", "numpy"], "excludes": ["tkinter"]}
# GUI appli | cations require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "eyetracker",
version = "0.1",
description ="Application for eyetracking.",
options = {"build_exe": build_exe_options},
executables = [Executable("./src/capture/main.py", base=base)]) |
drawquest/drawquest-web | website/canvas/tests/test_views.py | Python | bsd-3-clause | 6,390 | 0.001878 | import copy
import logging
import random
import urllib
from django.contrib.sessions.backends.cache import SessionStore
from django.contrib.sessions.models import Session
from django.core.urlresolvers import reverse
from django.http import Http404
import facebook
from apps.canvas_auth.models import User, AnonymousUser
from apps.signup.views import signup, get_signup_context
from canvas import bgwork, stickers, views, util, knobs
from canvas.models import (Visibility, get_system_user, Config, Category, FacebookUser,
EmailUnsubscribe, APIApp, APIAuthToken, CommentSticker, Comment)
from canvas.notifications.email_channel import EmailChannel
from canvas.notifications.notification_models import UserNotificationsSubscription
from canvas.templatetags import canvas_tags
from canvas.tests.tests_helpers import (CanvasTestCase, create_content, create_group, create_user, create_staff,
create_comment, create_gif_content, FakeRequest, pretty_print_etree)
from canvas.util import get_or_create, dumps
import configuration
from services import Services, override_service, FakeTimeProvider, FakeRandomProvider, FakeExperimentPlacer
class TestEmailViews(CanvasTestCase):
def test_user_id_token_allows_unsubscribe(self):
user = create_user()
url = "/unsubscribe?" + urllib.urlencode({
'action': 'remixed',
'token': util.token(user.id),
'user_id': user.id,
})
self.assertTrue(user.kv.subscriptions.can_receive('remixed'))
self.assertStatus(200, url, user=AnonymousUser())
self.assertFalse(user.kv.subscriptions.can_receive('remixed'))
def test_email_token_that_corresponds_to_user_allows_from_channel(self):
user = create_user()
url = "/unsubscribe?" + urllib.urlencode({
'action': 'remixed',
'token': util.token(user.email),
'email': user.email,
})
self.assertTrue(user.kv.subscriptions.can_receive('remixed'))
self.assertStatus(200, url, user=AnonymousUser())
self.assertFalse(user.kv.subscriptions.can_receive('remixed'))
def test_email_token_allows_unsubscribe_from_all(self):
email = "foo@example.com"
url = "/unsubscribe?" + urllib.urlencode({
'token': util.token(email),
'email': email,
})
self.assertFalse(EmailUnsubscribe.objects.get_or_none(email=email))
self.assertStatus(200, url, user=AnonymousUser())
self.assertTrue(EmailUnsubscribe.objects.get_or_none(email=email))
def test_broken_token_ignored_for_logged_in_user(self):
user = create_user()
url = "/unsubscribe?" + urllib.urlencode({
'action': 'remixed',
'token': "GARBAGE DAY",
'email': user.email,
})
self.assertTrue(user.kv.subscriptions.can_receive('remixed'))
self.assertStatus(200, url, user=user)
self.assertFalse(user.kv.subscriptions.can_receive('remixed'))
def test_unsubscribe_page_without_user_id(self):
user = create_user()
re | sp = self.get('/unsubscribe?' + urllib.urlencode({
'token': util.token(user.email),
'email': user.email,
}))
self.assertNumCssMatches(0, resp, 'input[name="user_id"]')
def test_unsubscribe_pa | ge_with_user_id(self):
user = create_user()
resp = self.get('/unsubscribe?' + urllib.urlencode({
'token': util.token(user.email),
'email': user.email,
'user_id': user.id,
}))
selector = 'input[name="user_id"]'
self.assertNumCssMatches(1, resp, 'input[name="user_id"]')
def test_granular_unsubscribe(self):
all_actions = EmailChannel.all_handled_actions()
for action in all_actions:
if action == 'newsletter':
continue
u = create_user()
assert u.kv.subscriptions.can_receive(action)
actions_dict = {}
actions_dict = {action: "on"}
self.validate_unsubscript(actions_dict, u)
assert u.kv.subscriptions.can_receive(action)
def test_unsubscribe_headers(self):
action = 'remixed'
user = create_user()
self.assertTrue(user.kv.subscriptions.can_receive(action, EmailChannel))
self.assertStatus(200, "/unsubscribe?action="+action, user=user)
self.assertFalse(user.kv.subscriptions.can_receive(action, EmailChannel))
def test_granualr_unsubscribe_blanket_ban(self):
all_actions = EmailChannel.all_handled_actions()
# ALL has inverted semantics ... make sure it works.
all_actions.append("ALL")
# Reuse the same user
canvas_user = create_user()
action = "ALL"
actions_dict = {action: "on"}
unsubscriptions = self.validate_unsubscript(actions_dict, canvas_user, all_actions)
for action in all_actions:
# Ensure that we unsubscribed from all of them!
assert unsubscriptions.get(action)
action = "ALL"
# Remove blanket subscription
actions_dict = {}
request = FakeRequest()
views.handle_unsubscribe_post(canvas_user, actions_dict, request)
unsubscriptions = views.get_unsubscriptions(canvas_user, all_actions)
for action in all_actions:
# Ensure that the user is now subscribed for everything, which is the default without the blanket ban.
assert not unsubscriptions.get(action)
def validate_unsubscript(self, actions_dict, canvas_user=None, all_actions=None):
if not canvas_user:
canvas_user = create_user()
if not all_actions:
all_actions = EmailChannel.all_handled_actions()
request = FakeRequest()
views.handle_unsubscribe_post(canvas_user, actions_dict, request)
unsubscriptions = views.get_unsubscriptions(canvas_user, all_actions)
for action in all_actions:
if action == 'newsletter':
continue
value = action
if action == "ALL":
value = not action
if actions_dict.get(action) == "on":
assert not unsubscriptions.get(value)
else:
assert unsubscriptions.get(value)
return unsubscriptions
|
svenstaro/uni-projekt | hardware/registerbank.py | Python | gpl-3.0 | 674 | 0.008902 | from myhdl import *
def registerbank(clk, reset, we, addrx, addry, addrz, xout, yout, zin, amount = 16, bitwidth = 32, protect0=True):
reg_da | ta = [Signal(intbv(0)[bitwidth:]) for _ in range(amount)]
@always_seq(clk.posedge, reset=reset)
def write():
assert addrx < amount
assert addry < amount
assert addrz < amount
if we and not(protect0 and addrz == 0):
reg_data[addrz].next = zin[bitwidth:]
@always_comb
def read():
assert addrx < amount
| assert addry < amount
assert addry < amount
xout.next = reg_data[addrx]
yout.next = reg_data[addry]
return write, read
|
kevinhikali/ml_kevin | backup/unet/layers.py | Python | gpl-3.0 | 2,726 | 0.013206 | # tf_unet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tf_unet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tf_unet. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Aug 19, 2016
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import tensorflow as tf
def weight_variable(shape, stddev=0.1):
initial = tf.truncated_normal(shape, stddev=stddev)
return tf.Variable(initial)
def weight_variable_devonc(shape, stddev=0.1):
return tf.Variable(tf.truncated_normal(shape, stddev=stddev))
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W,keep_prob_):
conv_2d = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
return tf.nn.dropout(conv_2d, keep_prob_)
def deconv2d(x | , W,stride):
x_shape = tf.shape(x)
output_shape = tf.stack([x_shape[0], x_shape[1]*2, x_shape[2]*2, x_shape[3]//2])
return tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='VAL | ID')
def max_pool(x,n):
return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID')
def crop_and_concat(x1,x2):
x1_shape = tf.shape(x1)
x2_shape = tf.shape(x2)
# offsets for the top left corner of the crop
offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
size = [-1, x2_shape[1], x2_shape[2], -1]
x1_crop = tf.slice(x1, offsets, size)
return tf.concat([x1_crop, x2], 3)
def pixel_wise_softmax(output_map):
exponential_map = tf.exp(output_map)
evidence = tf.add(exponential_map,tf.reverse(exponential_map,[False,False,False,True]))
return tf.div(exponential_map,evidence, name="pixel_wise_softmax")
def pixel_wise_softmax_2(output_map):
exponential_map = tf.exp(output_map)
sum_exp = tf.reduce_sum(exponential_map, 3, keep_dims=True)
tensor_sum_exp = tf.tile(sum_exp, tf.stack([1, 1, 1, tf.shape(output_map)[3]]))
return tf.div(exponential_map,tensor_sum_exp)
def cross_entropy(y_,output_map):
return -tf.reduce_mean(y_*tf.log(tf.clip_by_value(output_map,1e-10,1.0)), name="cross_entropy")
# return tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(output_map), reduction_indices=[1]))
|
Ouranosinc/Magpie | magpie/alembic/versions/2021-04-19_00c617174e54_unique_user_emails.py | Python | apache-2.0 | 450 | 0 | """
Unique user emails.
Revision ID: 00c617174e54
Revises: dea413e13a8a
Create Date: 2021-04-19 12:45:55.439916
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "00c617174e54"
down_revision = "dea413e13a8a"
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint("uq_users_email", "use | rs", ["email"])
def downgrade( | ):
op.drop_constraint("uq_users_email", "users", type="unique")
|
elc1798/chessley-tan | app.py | Python | mit | 5,117 | 0.006449 | import os
import module
from flask import Flask, render_template, request, session, redirect, url_for, send_from_directory
from werkzeug import secure_filename
from functools import wraps
app = Flask(__name__)
# Configure upload locations
app.config['UPLOAD_FOLDER'] = 'uploads/'
app.config['ALLOWED_EXTENSIONS'] = set(['chessley']) # Change this to whatever filetype to accept
# Checks if uploaded file is a valid file
def allowed_file(filename):
"""
Checks if 'filename' is allowed to be uploaded to the server
Params:
filename - String containing the name of the uploaded file
Returns:
True if the file is allowed, False otherwise
"""
return '.' in filename and filename.rsplit('.',1)[1] in app.config['ALLOWED_EXTENSIONS']
# Wraps for login requirements on certain app.routes
def login_required(f):
"""
Python function wrapper, used on functions that require being logged in to
view. Run before a function's body is run.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if "authenticated" not in session or not session["authenticated"] or \
"username" not in session:
session.clear()
return redirect(url_for("login"))
return f(*args, **kwargs)
return decorated_function
def redirect_if_logged_in(f):
"""
Python function wrapper, used on functions to redirect to other pages if
the user is already logged in. Run before a function's body is run.
"""
@wraps(f)
d | ef decorated_function(*args, **kwargs):
if "authenticated" in session and session["authenticated"]:
return redirect(url_for("profile"))
return f(*args, **kwargs)
return decorated_function
############### APPLICATION SITE ROUTES ###############
@app.route("/" | )
@app.route("/home")
@app.route("/home/")
@redirect_if_logged_in
def home():
return render_template("home.html")
@app.route("/login", methods=["GET","POST"])
@app.route("/login/", methods=["GET","POST"])
@redirect_if_logged_in
def login():
if request.method == "POST":
REQUIRED = ["username", "pass"]
for form_elem in REQUIRED:
if form_elem not in request.form:
return render_template("login.html")
if module.authenticate(request.form['username'], request.form['pass']):
session["authenticated"] = True
session["username"] = request.form['username']
return redirect(url_for("profile"))
return render_template("login.html")
@app.route("/logout")
@app.route("/logout/")
@login_required
def logout():
session.clear()
return redirect(url_for("login"))
@app.route("/register", methods=["POST"])
@app.route("/register/", methods=["POST"])
@redirect_if_logged_in
def register():
REQUIRED = ["username", "pass", "pass2"]
for form_elem in REQUIRED:
if form_elem not in request.form:
return redirect(url_for("home"))
if request.form["pass"] != request.form["pass2"]:
return redirect(url_for("home"))
if module.newUser(request.form["username"], request.form["pass"]):
session['authenticated'] = True
session['username'] = request.form['username']
return redirect(url_for("profile"))
else:
return redirect(url_for("home"))
@app.route("/about")
@app.route("/about/")
def about():
LOGGED_IN = "authenticated" in session and session["authenticated"]
return render_template("about.html", AUTH=LOGGED_IN)
@app.route("/download", methods=["GET", "POST"])
@app.route("/download/", methods=["GET", "POST"])
@login_required
def download():
return render_template('download.html', USERNAME=session['username']) # For when the Jinja is configured
@app.route("/upload", methods=["GET","POST"])
@app.route("/upload/", methods=["GET","POST"])
@login_required
def upload():
if request.method == "POST":
file = request.files["upload_bot"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename + session["username"] + "_bot.chessley"))
return render_template("upload.html")
@app.route("/leaderboards", methods=["GET", "POST"])
@app.route("/leaderboards/", methods=["GET", "POST"])
def leaderboards():
LOGGED_IN = "authenticated" in session and session["authenticated"]
table = module.getRankedUsers()
return render_template("leaderboards.html", table=table, AUTH=LOGGED_IN)
@app.route("/profile", methods=["GET","POST"])
@app.route("/profile/", methods=["GET","POST"])
@login_required
def profile():
if 'username' in session and session['username']!=0:
#retrieve user data here
dict = module.getUser(session['username'])
#dict = {"rank":1,"elo":1400,"wins":100,"losses":50,"stalemates":0}
return render_template("profile.html", USERNAME=session['username'], DICT=dict)
return render_template("home.html")
app.secret_key = str(os.urandom(24))
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0", port=5000)
|
ajrichards/bayesian-examples | hypothesis-testing/binomial_prob.py | Python | bsd-3-clause | 1,308 | 0.009174 | #!/usr/bin/env python
"""
Let's say we play a game where I keep flipping a coin until I get
heads. If the first time I get heads is on the nth coin, then I pay
you 2n-1 dollars. How much would you pay me to play this game?
You should end up with a sequence that you need to find the closed
form of. If you don't know how to do this, write some python code that
sums the first 100.
E(W) = sum_{n >= 1} (2n-1)/2^n = 3
"""
import matplotlib.pyplot as plt
import numpy as np
## simulate the number of flips before heads
def coin():
tails, num_flips = True, 0
while tails:
num_flips += 1
if np.random.binomial(1,0.5):
tails | = False
return num_flips
if __name__ == '__main__':
## simulate
flips = [coin() for k in xrange(10000)]
## get the distribution of counts condition on the number of flips
range_flips = range(1, max(flips) + 1)
counts = np.array([flips.count(k)*1. for k in range_flips])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(range_flips,counts,alpha=0.4)
ax.set_yla | bel("counts")
ax.set_xlabel("num flips to win")
#print [int(i) for i in counts]
winnings = sum([counts[k - 1]*(2*(k)-1)/sum(counts) for k in range_flips])
#print range_flips
print winnings
plt.show()
|
openstack/senlin | senlin/tests/unit/profiles/test_profile_base.py | Python | apache-2.0 | 38,181 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from oslo_context import context as oslo_ctx
from senlin.common import consts
from senlin.common import context as senlin_ctx
from senlin.common import exception
from senlin.common import schema
from senlin.common import utils as common_utils
from senlin.engine import environment
from senlin.engine import parser
from senlin.objects import credential as co
from senlin.objects import profile as po
from senlin.profiles import base as pb
from senlin.profiles.os.nova import server as nova_server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
sample_profile = """
type: os.dummy
version: 1.0
properties:
key1: value1
key2: 2
"""
class DummyProfile(pb.Profile):
VERSION = '1.0'
CONTEXT = 'context'
properties_schema = {
CONTEXT: schema.Map(
'context data'
),
'key1': schema.String(
'first key',
default='value1',
updatable=True,
),
'key2': schema.Integer(
'second key',
required=True,
updatable=True,
),
'key3': schema.String(
'third key',
),
}
OPERATIONS = {
'op1': schema.Operation(
'Operation 1',
schema={
'param1': schema.StringParam(
'description of param1',
)
}
)
}
def __init__(self, name, spec, **kwargs):
super(DummyProfile, self).__init__(name, spec, **kwargs)
class TestProfileBase(base.SenlinTestCase):
def setUp(self):
super(TestProfileBase, self).setUp()
self.ctx = utils.dummy_context(project='profile_test_project')
g_env = environment.global_env()
g_env.register_profile('os.dummy-1.0', DummyProfile)
g_env.register_profile('os.dummy-1.1', DummyProfile)
self.spec = parser.simple_parse(sample_profile)
def _create_profile(self, name, pid=None, context=None):
profile = pb.Profile(name, self.spec,
user=self.ctx.user_id,
project=self.ctx.project_id,
domain=self.ctx.domain_id,
context=context)
if pid:
profile.id = pid
profile.context = context
return profile
@mock.patch.object(senlin_ctx, 'get_service_credentials')
def test_init(self, mock_creds):
mock_creds.return_value = {'foo': 'bar'}
profile = self._create_profile('test-profile')
self.assertIsNone(profile.id)
self.assertEqual('test-profile', profile.name)
self.assertEqual(self.spec, profile.spec)
self.assertEqual('os.dummy', profile.type_name)
self.assertEqual('1.0', profile.version)
self.assertEqual('os.dummy-1.0', profile.type)
self.assertEqual(self.ctx.user_id, profile.user)
self.assertEqual(self.ctx.project_id, profile.project)
self.assertEqual(self.ctx.domain_id, profile.domain)
self.assertEqual({}, profile.metadata)
self.assertIsNone(profile.created_at)
self.assertIsNone(profile.updated_at)
spec_data = profile.spec_data
self.assertEqual('os.dummy', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
self.assertEqual('value1', spec_data['properties']['key1'])
self.assertEqual(2, spec_data['properties']['key2'])
self.assertEqual('value1', profile.properties['key1'])
self.assertEqual(2, profile.properties['key2'])
self.assertEqual({'foo': 'bar'}, profile.context)
self.assertIsNone(profile._computeclient)
self.assertIsNone(profile._networkclient)
self.assertIsNone(profile._orchestrationclient)
self.assertIsNone(profile._block_storageclient)
@mock.patch.object(senlin_ctx, 'get_service_credentials')
def test_init_version_as_float(self, mock_creds):
mock_creds.return_value = {'foo': 'bar'}
self.spec['version'] = 1.1
profile = self._create_profile('test-profile')
self.assertIsNone(profile.id)
self.assertEqual('test-profile', profile.name)
self.assertEqual(self.spec, profile.spec)
self.assertEqual('os.dummy', profile.type_name)
self.assertEqual('1.1', profile.version)
self.assertEqual('os.dummy-1.1', profile.type)
self.assertEqual(self.ctx.user_id, profile.user)
self.assertEqual(self.ctx.project_id, profile.project)
self.assertEqual(self.ctx.domain_id, profile.domain)
self.assertEqual({}, profile.metadata)
self.assertIsNone(profile.created_at)
self.assertIsNone(profile.updated_at)
spec_data = profile.spec_data
self.assertEqual('os.dummy', spec_data['type'])
self.assertEqual('1.1', spec_data['version'])
self.assertEqual('value1', spec_data['properties']['key1'])
self.assertEqual(2, spec_data['properties']['key2'])
self.assertEqual('value1', profile.properties['key1'])
self.assertEqual(2, profile.properties['key2'])
self.assertEqual({'foo': 'bar'}, profile.context)
self.assertIsNone(profile._computeclient)
self.assertIsNone(profile._networkclient)
self.assertIsNone(profile._orchestrationclient)
self.assertIsNone(profile._block_storageclient)
@mock.patch.object(senlin_ctx, 'get_service_credentials')
def test_init_version_as_string(self, mock_creds):
mock_creds.return_value = {'foo': 'bar'}
self.spec['version'] = '1.1'
profile = self._create_profile('test-profile')
self.assertIsNone(profile.id)
self.assertEqual('test-profile', profile.name)
self.assertEqual(self.spec, profile.spec)
self.assertEqual('os.dummy', profile.type_name)
self.assertEqual('1.1', profile.version)
self.assertEqual('os.dummy-1.1', profile.type)
self.assertEqual(self.ctx.user_id, profile.user)
self.assertEqual(self.ctx.project_id, profile.project)
self.assertEqual(self.ctx.domain_id, profile.domain)
self.assertEqual({}, profile.metadata)
self.assertIsNone(profile.created_at)
self.assertIsNone(profile.updated_at)
spec_data = profile.spec_data
self.assertEqual('os.dummy', spec_data['type'])
self.assertEqual('1.1', spec_data['version'])
self.ass | ertEqual('value1', spec_data['properties']['key1'])
self.assertEqual(2, spec_data['properties']['key2'])
self.assertEqual('value1', profile.properties['key1'])
self.assertEqual(2, profile.properties['key2'])
self.assertEqual({'foo': 'bar'}, profile.context)
self.assertIsNone(profile._computeclient)
self.assertIsNone(profile._networkclient)
self.assertIsNone(profile._orchestrationclient)
self.as | sertIsNone(profile._block_storageclient)
@mock.patch.object(senlin_ctx, 'get_service_credentials')
def test_init_with_context(self, mock_creds):
mock_creds.return_value = {'foo': 'bar'}
profile = self._create_profile('test-profile',
pid='FAKE_ID', context={'bar': 'foo'})
self.assertEqual({'bar': 'foo'}, profile.context)
def test_init_bad_type(self):
bad_spec = {
'type': 'bad-type',
'version': '1.0',
'properties': '',
}
self.assertRaises(exception.ResourceNotFound,
pb.Profile,
'test-p |
anhstudios/swganh | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_backpack_field_06.py | Python | mit | 468 | 0.047009 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
| result = Intangible()
result.template = "object/draft_schematic/cl | othing/shared_clothing_ith_backpack_field_06.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
bmya/odoo-infrastructure | infrastructure_mass_mailing/__openerp__.py | Python | agpl-3.0 | 618 | 0 | # -*- coding: utf-8 -*-
{
'name': 'Infrastructure Mass Mailing Integration',
'version': '8.0.1.0.0',
'description': u'Infrastructure Mass Mailing Integration',
'category': u'base.module_category_knowledge_management',
'author': u'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'depends': [
'infrastructure',
'mass_mailing',
],
| 'sequence': 14,
'summary': '',
'installable': True,
'auto_install': True,
'application': False,
'images': [],
'data': | [
'mass_mailing_view.xml',
],
'demo': [],
'test': [],
}
|
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/iprg_broker.py | Python | apache-2.0 | 54,667 | 0.002232 | from ..broker import Broker
class IprgBroker(Broker):
controller = "iprgs"
def show(self, **kwargs):
"""Shows the details for the specified iprg.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg: The iprg identified by the specified IprgID.
:rtype iprg: Iprg
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available iprgs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``def | ault:`` None
:param IprgNumber: The HSRP or VRRP group | number.
:type IprgNumber: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects t |
mhvk/numpy | numpy/f2py/cfuncs.py | Python | bsd-3-clause | 48,940 | 0.001798 | #!/usr/bin/env python3
"""
C declarations, CPP macros, and C functions for f2py2e.
Only required declarations/macros/functions will be used.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 11:42:34 $
Pearu Peterson
"""
import sys
import copy
from . import __version__
f2py_version = __version__.version
errmess = sys.stderr.write
##################### Definitions ##################
outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [],
'userincludes': [],
'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [],
'commonhooks': []}
needs = {}
includes0 = {'includes0': '/*need_includes0*/'}
includes = {'includes': '/*need_includes*/'}
userincludes = {'userincludes': '/*need_userincludes*/'}
typedefs = {'typedefs': '/*need_typedefs*/'}
typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'}
cppmacros = {'cppmacros': '/*need_cppmacros*/'}
cfuncs = {'cfuncs': '/*need_cfuncs*/'}
callbacks = {'callbacks': '/*need_callbacks*/'}
f90modhooks = {'f90modhooks': '/*need_f90modhooks*/',
'initf90modhooksstatic': '/*initf90modhooksstatic*/',
'initf90modhooksdynamic': '/*initf90modhooksdynamic*/',
}
commonhooks = {'commonhooks | ': '/*need_commonhooks*/',
'initcommonhooks': '/*need_initcommonhooks*/',
}
############ Includes ###################
includes0['math.h'] = '#include <math.h>'
includes0['string.h'] | = '#include <string.h>'
includes0['setjmp.h'] = '#include <setjmp.h>'
includes['Python.h'] = '#include "Python.h"'
needs['arrayobject.h'] = ['Python.h']
includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API
#include "arrayobject.h"'''
includes['arrayobject.h'] = '#include "fortranobject.h"'
includes['stdarg.h'] = '#include <stdarg.h>'
############# Type definitions ###############
typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;'
typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;'
typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;'
typedefs['signed_char'] = 'typedef signed char signed_char;'
typedefs['long_long'] = """\
#ifdef _WIN32
typedef __int64 long_long;
#else
typedef long long long_long;
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['unsigned_long_long'] = """\
#ifdef _WIN32
typedef __uint64 long_long;
#else
typedef unsigned long long unsigned_long_long;
#endif
"""
typedefs['long_double'] = """\
#ifndef _LONG_DOUBLE
typedef long double long_double;
#endif
"""
typedefs[
'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;'
typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;'
typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;'
typedefs['string'] = """typedef char * string;"""
############### CPP macros ####################
cppmacros['CFUNCSMESS'] = """\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
#endif
"""
cppmacros['F_FUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F
#else
#define F_FUNC(f,F) _##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) _##F##_
#else
#define F_FUNC(f,F) _##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F
#else
#define F_FUNC(f,F) f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_FUNC(f,F) F##_
#else
#define F_FUNC(f,F) f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_FUNC_US(f,F) F_FUNC(f##_,F##_)
#else
#define F_FUNC_US(f,F) F_FUNC(f,F)
#endif
"""
cppmacros['F_WRAPPEDFUNC'] = """\
#if defined(PREPEND_FORTRAN)
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_
#endif
#endif
#else
#if defined(NO_APPEND_FORTRAN)
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f
#endif
#else
#if defined(UPPERCASE_FORTRAN)
#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_
#else
#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_
#endif
#endif
#endif
#if defined(UNDERSCORE_G77)
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_)
#else
#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F)
#endif
"""
cppmacros['F_MODFUNC'] = """\
#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f
#else
#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f
#else
#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _
#endif
#endif
#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */
#if defined(NO_APPEND_FORTRAN)
#define F_MODFUNCNAME(m,f) f ## .in. ## m
#else
#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _
#endif
#endif
/*
#if defined(UPPERCASE_FORTRAN)
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F)
#else
#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f)
#endif
*/
#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f))
"""
cppmacros['SWAPUNSAFE'] = """\
#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\
(size_t)(a) = ((size_t)(a) ^ (size_t)(b))
"""
cppmacros['SWAP'] = """\
#define SWAP(a,b,t) {\\
t *c;\\
c = a;\\
a = b;\\
b = c;}
"""
# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
# NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR'] = """\
#define PRINTPYOBJERR(obj)\\
fprintf(stderr,\"#modulename#.error is related to \");\\
PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
fprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX'] = """\
#ifndef max
#define max(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef min
#define min(a,b) ((a < b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a > b) ? (a) : (b))
#endif
#ifndef MIN
#define MIN(a,b) ((a < b) ? (a) : (b))
#endif
"""
needs['len..'] = ['f2py_size']
cppmacros['len..'] = """\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp)))
#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim)
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
#define old_size(var) PyArray_SIZE((PyArrayObject *)(capi_ ## var ## _tmp))
/* #define index(i) capi_i ## i */
#define slen(var) capi_ ## var ## _len
#define size(var, ...) f2py_size((PyArrayObject *)(capi_ ## var ## _tmp), ## __VA_ARGS__, -1)
"""
needs['f2py_size'] = ['stdarg.h']
cfuncs['f2py_size'] = """\
static int f2py_size(PyArrayObject* var, ...)
{
npy_int sz = 0;
npy_int dim;
npy_int rank;
va_list argp;
va_start(argp, var);
dim = va_arg(argp, npy_int);
if (dim==-1)
{
sz = PyArray_SIZE(var);
}
else
{
rank = PyArray_NDIM(var);
if (dim>=1 && dim<=rank)
sz = PyArray_DIM(var, dim-1);
else
fprintf(stderr, \"f2py_size: 2nd argument value=%d fails to satisfy 1<=value<=%d. Result will be 0.\\n\", dim, rank);
}
va_end(argp);
return sz;
}
"""
cppmacros[
'pyobj_from_char1'] = '#define pyobj_from_char1(v) (PyLong_FromLong(v))'
cppmacros[
'pyobj_from_short1'] = '#define pyobj_from_short1(v) (PyLong_FromLong(v))'
needs['pyobj_from_int1'] = ['signed_char']
cppmacros['pyobj_from_int1'] = '#define pyobj_from_int1(v) (PyLong_FromLong(v))'
cppm |
mlosch/pytorch-stats | stats/estimation/mm.py | Python | mit | 4,418 | 0.003395 | import numpy as np
import torch
from torch.autograd import Variable
from stats.tensor import tensor
def fit(pdfs, parameters, observations, iter, lr):
"""Estimates the parameters of a mixture model via maximum likelihood maximization.
Uses gradient descent for optimization.
Parameters
----------
pdfs : List of callable pdfs
Callable probability density functions (likelihood function)
expecting an array of observations as the only argument.
parameters : List of list
List of list of parameters that are subject to optimization.
e.g. for a bimodal gaussian mixture: [[mu_1, sigma_1], [mu_2, sigma_2]]
observations : ndarray
Observations from an unknown pdf which parameters are subject to be estimated
iter : float
Maximum number of iterations
lr : float
Gradient descent learning rate
Returns
-------
"""
# number of models/classes in mixture
K = len(parameters)
# initialize mixing coefficients with random values
mixcoeffs = np.random.rand(K)
mixcoeffs /= np.sum(mixcoeffs)
# make the coefficients visible to the update step
for k in range(K):
mixcoeff = Variable(tensor(mixcoeffs[k]), requires_grad=True)
parameters[k].append(mixcoeff)
for i in range(iter):
likelihood = 0
for k in range(K):
# multiply the likelihood with the mixing coefficients
# mixing coefficient: p(z_k = 1)
p_z = parameters[k][-1].expand(observations.size())
likelihood += pdfs[k](observations) * p_z
expectation = torch.mean(torch.log(likelihood))
# add constraint sum(mixcoeffs) = 1 via lagrange multiplier
for k in range(K):
expectation -= 1.0 * parameters[k][-1]
expectation += 1.0 # c = 1
if np.isnan(expectation.data[0]):
raise RuntimeError('Singular state. Try different initial parameters')
# Determine gradients
expectation.backward()
# Update parameters with gradient descent
for k in range(K):
for param in parameters[k]:
param.data.add_(lr * param.grad.data)
param.grad.data.zero_()
return expectation.data[0]
if __name__ == '__main__':
from stats.distributions import Normal
"""
Estimate mean and std of a gaussian mixture model via MixtureModel-MLE on Kx10000 observations
"""
np.random.seed(0)
# number of gaussian models in mixture
K = 2
pdfs = []
params = []
true_params = []
xs = []
for k in range(K):
# Sample observations from a bimodal normal distribution function with different parameter
true_mean = np.random.uniform(-10, 10)
true_std = np.random.uniform(0.5, 3.0)
xs.append(true_mean + np.random.randn(np.random.randint(500, 2000)) * true_std)
# Define likelihood function of model
mean_estimate = Variable(tensor(true_mean+5.*np.random.randn()), requires_grad=True)
std_estimate = Variable(tensor(1.0), requires_grad=True)
pdfs.append(Normal(mean_estimate, std_estimate))
params.append([mean_estimate, std_estimate])
true_params.append([true_mean, true_std])
x = np.concatenate(xs, axis=0)
observations = Variable(tensor(x))
log_likelihood = fit(pdfs, params, observations, iter=500, lr=0.1)
print('Log likelihood: %7.5f' % log_likelihood)
for k in range(K):
print('k=%d mean=% 7.5f std=% 7.5f coeff=% 7.5f' % (k, params[k][0].data[0], params[k][1].data[0], params[k][2].data[0]))
"""
Plot true and estimated distributions
"""
import matplotlib.pyplot as plt
n, _, _ = plt.h | ist(x, 100, normed=True)
# plot distributions
np_pdf = lambda x, mean, std: 1./np.sqrt(2.0*np.pi*std*std) * np.exp(- ((x-mean)**2) / (2.0 * std*std))
xx = np.linspace(np.min(x), np.max(x), 1000)
for k in range(K):
true_y = np_pdf(xx, true_params[k][0], true_params[k][1])
estimated_y = np_pdf(xx, params[k][0].data[0], params[k][1].data[0])
plt.plot(xx, true_y, '-.', label | ='Target pdf k=%d'%(k+1))
plt.plot(xx, estimated_y, '-', label='Estimated pdf %d' % (k+1))
plt.legend()
plt.show()
|
Naeka/vosae-app | www/timeline/models/base.py | Python | agpl-3.0 | 1,805 | 0.002216 | # -*- coding:Utf-8 -*-
from django.utils.timezone import now as datetime_now
from mongoengine import DynamicDocument, fields
from django.conf import settings
from realtime.utils import emit_to_channel
__all__ = (
'TimelineEntry',
)
class TimelineEntry(DynamicDocument):
tenant = fields.ReferenceField("Tenant", required=True)
issuer = fields.ReferenceField("VosaeUser")
module = fields.StringField(choices=settings.VOSAE_MODULES, required=True)
datetime = fields.DateTimeField(required=True, default=datetime_now)
access_permissio | n = fields.StringField()
see_permission = fields.StringField()
meta = {
"indexes": ["tenant", "access_permission", "see_permission"],
"ordering": ["-id"],
"allow_inheritance": True
}
@classmethod
def post_save(self, sender, document, **kwa | rgs):
"""
Post save hook handler
Emits timeline entry through the realtime service
"""
from core.models import VosaeUser
from timeline.api.resources import TimelineEntryResource
ter = TimelineEntryResource()
perms = []
if document.access_permission:
perms.append(document.access_permission)
if document.see_permission:
perms.append(document.see_permission)
for user_id in VosaeUser.objects.filter(tenant=document.tenant, permissions__acquired__all=perms).values_list('id'):
try:
resource_type = ter._get_type_from_class(ter._meta.polymorphic, document.__class__)
except:
resource_type = None
emit_to_channel(u'private-user-{0}'.format(unicode(user_id)), u'new-timeline-entry', {
u'id': unicode(document.id),
u'type': resource_type
})
|
google-research/google-research | rrlfd/adroit_ext/hammer_v0.py | Python | apache-2.0 | 1,602 | 0.001873 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adroit hammer environment variant with RGB and proprioceptive observations.
"""
from d4rl.hand_manipulation_suite import HammerEnvV0
import numpy as np
class VisualHammerEnvV0(HammerEnvV0):
"""Hammer environment with visual and proprioceptive observations."""
def __init__(self, camera_id, im_size, **kwargs):
self._camera_id = camera_id
self.im_size = im_size
super().__init__(**kwargs)
def get_obs(self):
rgb = self.physics.render(
self.im_size, self.im_size, | camera_id=self._camera_id)
qp = self.physics.data.qpos.ravel().copy()
qv = np.clip(self.physics.data.qvel.ravel(), -1.0, 1.0).copy()
palm_pos = self. | physics.data.site_xpos[self.S_grasp_sid].ravel().copy()
sensordata = np.clip(
self.physics.data.sensordata.ravel().copy()[:41], -5.0, 5.0)
original_obs = super().get_obs()
return {'rgb': rgb, 'qpos': qp[:-6], 'qvel': qv[:-6], 'palm_pos': palm_pos,
'tactile': sensordata, 'original_obs': original_obs}
|
azumimuo/family-xbmc-addon | zips/plugin.video.youtube/resources/lib/kodion/impl/mock/mock_context.py | Python | gpl-2.0 | 2,846 | 0.001054 | import time
__author__ = 'bromix'
import tempfile
from ..abstract_context import AbstractContext
from .mock_settings import MockSettings
from .mock_context_ui import MockContextUI
from .mock_system_version import MockSystemVersion
from ...logging import log
class MockContext(AbstractContext):
def __init__(self, path=u'/', params=None, plugin_name='MOCK Plugin', plugin_id='mock.plugin', ):
AbstractContext.__init__(self, path, params, plugin_name, plugin_id)
self._data_path = tempfile.gettempdir()
self._settings = MockSettings()
self._dict_localization = {5000: u'Hello World',
5001: u'MOCK Plugin'}
self._ui = None
self._system_version = MockSystemVersion(1, 3, 'Kodion Test System')
self._language = 'en-US'
self._version = '1.9'
pass
def set_localization(self, text_id, value):
self._dict_localization[text_id] = value
pass
def set_language(self, language):
self._language = language
pass
def get_language(self):
return self._language
def get_system_version(self):
return self._system_version
def get_ui(self):
if not self._ui:
self._ui = MockContextUI()
pass
return self._ui
def get_handle(self):
return 666
def get_data_path(self):
return self._data_path
def get_native_path(self):
return 'virtual_path'
def get_settings(self):
return self._settings
def localize(self, text_id, default_text=u''):
return self._dict_localization.get(text_id, default_text)
def set_content_type(self, content_type):
log("Set ContentType to '%s'" % content_type)
pass
def add_sort_method(self, *sort_methods):
for sort_method in sort_methods:
log("add SortMethod '%s'" % (str(sort_method)))
pass
pass
def clone(self, new_path=None, new_params=None):
if not new_path:
new_path = self.get_path()
pass
if not new_params:
new_params = self.get_params()
pass
new_context = MockContext(path=new_path, params=new_params, plugin_name=self._plugin_name,
plugin_id=self._plugin_id)
new_context._function_cache = self._function_cache
new_context._search_history = self._search_history
new_c | ontext._favorite_list = self._favorite_list
new_context._watch_later_list = self._watch_later_list
new_context._access_manager = self._access_manager
return new_context
def execute(self, command):
log("execute '%s'" % command)
pass
def sleep(self, milli_second | s):
time.sleep(milli_seconds/1000.0)
pass
pass |
mmanhertz/elopic | sample_code/pixelator.py | Python | bsd-2-clause | 9,517 | 0.00063 | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2004-2005 Trolltech AS. All rights reserved.
#
# This file is part of the example classes of the Qt Toolkit.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file LICENSE.GPL included in the packaging of
# self file. Please review the following information to ensure GNU
# General Public Licensing requirements will be met:
# http://www.trolltech.com/products/qt/opensource.html
#
# If you are unsure which license is appropriate for your use, please
# review the following information:
# http://www.trolltech.com/products/qt/licensing.html or contact the
# sales department at sales@trolltech.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
############################################################################
import sys
from PySide import QtCore, QtGui
import pixelator_rc
ItemSize = 256
class PixelDelegate(QtGui.QAbstractItemDelegate):
def __init__(self, parent=None):
super(PixelDelegat | e, self).__init__(parent)
self.pixelSize = 12
def paint(self, painter, option, index):
if option.state & QtGui.QStyle.State_Selected:
painter.fillRect(option.rect, option.palette.highlight())
size = min(option.rect.width(), option.rect.height())
brightness = index.model().data(index, QtCore.Qt.DisplayRole)
| radius = (size / 2.0) - (brightness / 255.0 * size / 2.0)
if radius == 0.0:
return
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setPen(QtCore.Qt.NoPen)
if option.state & QtGui.QStyle.State_Selected:
painter.setBrush(option.palette.highlightedText())
else:
painter.setBrush(QtGui.QBrush(QtCore.Qt.black))
painter.drawEllipse(QtCore.QRectF(
option.rect.x() + option.rect.width() / 2 - radius,
option.rect.y() + option.rect.height() / 2 - radius,
2 * radius, 2 * radius))
painter.restore()
def sizeHint(self, option, index):
return QtCore.QSize(self.pixelSize, self.pixelSize)
def setPixelSize(self, size):
self.pixelSize = size
class ImageModel(QtCore.QAbstractTableModel):
def __init__(self, parent=None):
super(ImageModel, self).__init__(parent)
self.modelImage = QtGui.QImage()
def setImage(self, image):
self.modelImage = QtGui.QImage(image)
self.reset()
def rowCount(self, parent):
return self.modelImage.height()
def columnCount(self, parent):
return self.modelImage.width()
def data(self, index, role):
if not index.isValid() or role != QtCore.Qt.DisplayRole:
return None
return QtGui.qGray(self.modelImage.pixel(index.column(), index.row()))
def headerData(self, section, orientation, role):
if role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(1, 1)
return None
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.currentPath = QtCore.QDir.homePath()
self.model = ImageModel(self)
centralWidget = QtGui.QWidget()
self.view = QtGui.QTableView()
self.view.setShowGrid(False)
self.view.horizontalHeader().hide()
self.view.verticalHeader().hide()
self.view.horizontalHeader().setMinimumSectionSize(1)
self.view.verticalHeader().setMinimumSectionSize(1)
self.view.setModel(self.model)
delegate = PixelDelegate(self)
self.view.setItemDelegate(delegate)
pixelSizeLabel = QtGui.QLabel("Pixel size:")
pixelSizeSpinBox = QtGui.QSpinBox()
pixelSizeSpinBox.setMinimum(4)
pixelSizeSpinBox.setMaximum(32)
pixelSizeSpinBox.setValue(12)
fileMenu = QtGui.QMenu("&File", self)
openAction = fileMenu.addAction("&Open...")
openAction.setShortcut("Ctrl+O")
self.printAction = fileMenu.addAction("&Print...")
self.printAction.setEnabled(False)
self.printAction.setShortcut("Ctrl+P")
quitAction = fileMenu.addAction("E&xit")
quitAction.setShortcut("Ctrl+Q")
helpMenu = QtGui.QMenu("&Help", self)
aboutAction = helpMenu.addAction("&About")
self.menuBar().addMenu(fileMenu)
self.menuBar().addSeparator()
self.menuBar().addMenu(helpMenu)
openAction.triggered.connect(self.chooseImage)
self.printAction.triggered.connect(self.printImage)
quitAction.triggered.connect(QtGui.qApp.quit)
aboutAction.triggered.connect(self.showAboutBox)
pixelSizeSpinBox.valueChanged[int].connect(delegate.setPixelSize)
pixelSizeSpinBox.valueChanged[int].connect(self.updateView)
controlsLayout = QtGui.QHBoxLayout()
controlsLayout.addWidget(pixelSizeLabel)
controlsLayout.addWidget(pixelSizeSpinBox)
controlsLayout.addStretch(1)
mainLayout = QtGui.QVBoxLayout()
mainLayout.addWidget(self.view)
mainLayout.addLayout(controlsLayout)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
self.setWindowTitle("Pixelator")
self.resize(640, 480)
def chooseImage(self):
fileName, _ = QtGui.QFileDialog.getOpenFileName(self, "Choose an Image",
self.currentPath, '*')
if fileName:
self.openImage(fileName)
def openImage(self, fileName):
image = QtGui.QImage()
if image.load(fileName):
self.model.setImage(image)
if not fileName.startswith(':/'):
self.currentPath = fileName
self.setWindowTitle("%s - Pixelator" % self.currentPath)
self.printAction.setEnabled(True)
self.updateView()
def printImage(self):
if self.model.rowCount(QtCore.QModelIndex()) * self.model.columnCount(
QtCore.QModelIndex()) > 90000:
answer = QtGui.QMessageBox.question(self, "Large Image Size",
"The printed image may be very large. Are you sure that "
"you want to print it?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if answer == QtGui.QMessageBox.No:
return
printer = QtGui.QPrinter(QtGui.QPrinter.HighResolution)
dlg = QtGui.QPrintDialog(printer, self)
dlg.setWindowTitle("Print Image")
if dlg.exec_() != QtGui.QDialog.Accepted:
return
painter = QtGui.QPainter()
painter.begin(printer)
rows = self.model.rowCount(QtCore.QModelIndex())
columns = self.model.columnCount(QtCore.QModelIndex())
sourceWidth = (columns + 1) * ItemSize
sourceHeight = (rows + 1) * ItemSize
painter.save()
xscale = printer.pageRect().width() / float(sourceWidth)
yscale = printer.pageRect().height() / float(sourceHeight)
scale = min(xscale, yscale)
painter.translate(
printer.pageRect().x() + printer.pageRect().width() / 2,
printer.pageRect().y() + printer.pageRect().height() / 2)
painter.scale(scale, scale)
painter.translate(-sourceWidt / 2, -sourceHeight / 2)
option = QtGui.QStyleOptionViewItem()
parent = QtCore.QModelIndex()
progress = QtGui.QProgressDialog("Printing...", "Cancel", 0, rows,
self)
y = ItemSize / 2.0
for row in range(rows):
progress.setValue(row)
QtGui.qApp.processEvents()
if progress.wasCanceled():
break
x = ItemSize / 2.0
for col in range(columns):
|
procangroup/edx-platform | pavelib/assets.py | Python | agpl-3.0 | 34,154 | 0.002225 | """
Asset compilation and collection.
"""
from __future__ import print_function
import argparse
import glob
import os
import traceback
from datetime import datetime
from functools import wraps
from threading import Timer
from paver import tasks
from paver.easy import call_task, cmdopts, consume_args, needs, no_help, path, sh, task
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers.api import DEFAULT_OBSERVER_TIMEOUT
from watchdog.observers import Observer
from openedx.core.djangoapps.theming.paver_helpers import get_theme_paths
from .utils.cmd import cmd, django_cmd
from .utils.envs import Env
from .utils.process import run_background_process
from .utils.timer import | timed
# setup baseline paths
ALL_SYSTEMS = ['lms', 'studio']
COFFEE_DIRS = ['lms', 'cms', 'common']
LMS = 'lms'
CMS = 'cms'
SYSTEMS = {
'lms': LMS,
'cms': CMS,
'studio': CMS
}
# Common lookup paths that are added to the lookup paths for all sass compilations
COMMON_LOOKUP_PATHS = [
path("common/static"),
path("common/static/sass"),
path('node_modules/@edx'),
path('node_modu | les'),
path('node_modules/edx-pattern-library/node_modules'),
]
# A list of NPM installed libraries that should be copied into the common
# static directory.
# If string ends with '/' then all file in the directory will be copied.
NPM_INSTALLED_LIBRARIES = [
'backbone.paginator/lib/backbone.paginator.js',
'backbone/backbone.js',
'bootstrap/dist/js/bootstrap.bundle.js',
'hls.js/dist/hls.js',
'jquery-migrate/dist/jquery-migrate.js',
'jquery.scrollto/jquery.scrollTo.js',
'jquery/dist/jquery.js',
'moment-timezone/builds/moment-timezone-with-data.js',
'moment/min/moment-with-locales.js',
'picturefill/dist/picturefill.js',
'requirejs/require.js',
'underscore.string/dist/underscore.string.js',
'underscore/underscore.js',
'@edx/studio-frontend/dist/',
'which-country/index.js'
]
# A list of NPM installed developer libraries that should be copied into the common
# static directory only in development mode.
NPM_INSTALLED_DEVELOPER_LIBRARIES = [
'sinon/pkg/sinon.js',
'squirejs/src/Squire.js',
]
# Directory to install static vendor files
NPM_JS_VENDOR_DIRECTORY = path('common/static/common/js/vendor')
NPM_CSS_VENDOR_DIRECTORY = path("common/static/common/css/vendor")
NPM_CSS_DIRECTORY = path("common/static/common/css")
# system specific lookup path additions, add sass dirs if one system depends on the sass files for other systems
SASS_LOOKUP_DEPENDENCIES = {
'cms': [path('lms') / 'static' / 'sass' / 'partials', ],
}
# Collectstatic log directory setting
COLLECTSTATIC_LOG_DIR_ARG = 'collect_log_dir'
# Webpack command
WEBPACK_COMMAND = 'STATIC_ROOT_LMS={static_root_lms} STATIC_ROOT_CMS={static_root_cms} $(npm bin)/webpack {options}'
def get_sass_directories(system, theme_dir=None):
"""
Determine the set of SASS directories to be compiled for the specified list of system and theme
and return a list of those directories.
Each item in the list is dict object containing the following key-value pairs.
{
"sass_source_dir": "", # directory where source sass files are present
"css_destination_dir": "", # destination where css files would be placed
"lookup_paths": [], # list of directories to be passed as lookup paths for @import resolution.
}
if theme_dir is empty or None then return sass directories for the given system only. (i.e. lms or cms)
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
:param theme_dir: absolute path of theme for which to compile sass files.
"""
if system not in SYSTEMS:
raise ValueError("'system' must be one of ({allowed_values})".format(allowed_values=', '.join(SYSTEMS.keys())))
system = SYSTEMS[system]
applicable_directories = list()
if theme_dir:
# Add theme sass directories
applicable_directories.extend(
get_theme_sass_dirs(system, theme_dir)
)
else:
# add system sass directories
applicable_directories.extend(
get_system_sass_dirs(system)
)
return applicable_directories
def get_common_sass_directories():
"""
Determine the set of common SASS directories to be compiled for all the systems and themes.
Each item in the returned list is dict object containing the following key-value pairs.
{
"sass_source_dir": "", # directory where source sass files are present
"css_destination_dir": "", # destination where css files would be placed
"lookup_paths": [], # list of directories to be passed as lookup paths for @import resolution.
}
"""
applicable_directories = list()
# add common sass directories
applicable_directories.append({
"sass_source_dir": path("common/static/sass"),
"css_destination_dir": path("common/static/css"),
"lookup_paths": COMMON_LOOKUP_PATHS,
})
return applicable_directories
def get_theme_sass_dirs(system, theme_dir):
"""
Return list of sass dirs that need to be compiled for the given theme.
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
:param theme_dir: absolute path of theme for which to compile sass files.
"""
if system not in ('lms', 'cms'):
raise ValueError('"system" must either be "lms" or "cms"')
dirs = []
system_sass_dir = path(system) / "static" / "sass"
sass_dir = theme_dir / system / "static" / "sass"
css_dir = theme_dir / system / "static" / "css"
dependencies = SASS_LOOKUP_DEPENDENCIES.get(system, [])
if sass_dir.isdir():
css_dir.mkdir_p()
# first compile lms sass files and place css in theme dir
dirs.append({
"sass_source_dir": system_sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
system_sass_dir / "partials",
system_sass_dir,
],
})
# now compile theme sass files and override css files generated from lms
dirs.append({
"sass_source_dir": sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
system_sass_dir / "partials",
system_sass_dir,
],
})
return dirs
def get_system_sass_dirs(system):
"""
Return list of sass dirs that need to be compiled for the given system.
:param system: name if the system for which to compile sass e.g. 'lms', 'cms'
"""
if system not in ('lms', 'cms'):
raise ValueError('"system" must either be "lms" or "cms"')
dirs = []
sass_dir = path(system) / "static" / "sass"
css_dir = path(system) / "static" / "css"
dependencies = SASS_LOOKUP_DEPENDENCIES.get(system, [])
dirs.append({
"sass_source_dir": sass_dir,
"css_destination_dir": css_dir,
"lookup_paths": dependencies + [
sass_dir / "partials",
sass_dir,
],
})
if system == 'lms':
dirs.append({
"sass_source_dir": path(system) / "static" / "certificates" / "sass",
"css_destination_dir": path(system) / "static" / "certificates" / "css",
"lookup_paths": [
sass_dir / "partials",
sass_dir
],
})
return dirs
def get_watcher_dirs(theme_dirs=None, themes=None):
"""
Return sass directories that need to be added to sass watcher.
Example:
>> get_watcher_dirs('/edx/app/edx-platform/themes', ['red-theme'])
[
'common/static',
'common/static/sass',
'lms/static/sass',
'lms/static/sass/partials',
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/static/sass',
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/static/sass/partials',
'cms/static/sass',
'cms/static/sass/partials',
'/edx/ |
cryptoprojects/ultimateonlinecash | test/functional/signrawtransactions.py | Python | mit | 7,187 | 0.002644 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invali | d (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], | inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
azogue/esiosdata | tests/test_perfiles_consumo_pvpc.py | Python | mit | 2,973 | 0.003712 | # -*- coding: utf-8 -*-
"""
Test Cases para el cálculo de los perfiles de consumo de referencia para la estimación del reparto horario
del consumo eléctrico, con el fin de calcular el PVPC a aplicar al periodo considerado.
"""
from unittest import TestCase
import pandas as pd
class TestsPerfilesConsumo(TestCase):
"""Tests para el cálculo de los perfiles de consumo."""
def test_0_perfiles_estimados_2017(self):
"""Extracción de perfiles estimados para 2017."""
from esiosdata.perfilesconsumopvpc import get_data_perfiles_estimados_2017, get_data_perfiles_finales_mes
perfiles_2017 = get_data_perfiles_estimados_2017(force_download=False)
print(perfiles_2017)
self.assertIs(perfiles_2017.empty, False)
self.assertEqual(round(perfiles_2017.sum().sum(), 3), 4.) # 4 cols, cada una debe sumar 1 para el año completo.
perfiles_2017_02 = get_data_perfiles_finales_mes(2017, 2)
print(perfiles_2017_02.head())
self.assertIs(perfiles_2017_02.empty, False)
# read from disk:
perfiles_2017_bis = get_data_perfiles_estimados_2017(force_download=True)
perfiles_2017_bis2 = get_data_perfiles_estimados_2017(force_download=False)
assert pd.DataFrame(perfiles_2017 == perfiles_2017_bis).all().all()
assert pd.DataFrame(perfiles_2017_bis2 == perfiles_2017_bis).all().all()
def test_perfiles_finales(self):
"""Extracción de perfiles finales."""
from esiosdata.perfilesconsumopvpc import get_data_perfiles_finales_mes
perfiles_finales_2016_11 = get_data_perfiles_finales_mes(2016, 11)
print(perfiles_finales_2016_11)
self.assertIs(perfiles_finales_2016_11.empty, False)
def test_perfiles_estimacion_consumo_horario(self):
"""
Estimación de consumo horario a partir de consumo total en u | n intervalo.
Ejem | plo de generación de valores de consumo horario a partir de consumo total y perfiles de uso.
"""
from esiosdata.perfilesconsumopvpc import perfiles_consumo_en_intervalo
ts_0, ts_f = '2016-10-29', '2017-01-24'
consumo_total_interv_kwh = 836.916
print('Consumo horario estimado para el intervalo {} -> {}, con E={:.3f} kWh'
.format(ts_0, ts_f, consumo_total_interv_kwh))
# perfiles finales:
perfs_interv = perfiles_consumo_en_intervalo(ts_0, ts_f)
print(perfs_interv.head())
print(perfs_interv.tail())
# perfiles finales 1 mes:
perfs_interv = perfiles_consumo_en_intervalo(ts_0, '2016-10-30')
print(perfs_interv.head())
print(perfs_interv.tail())
# Estimación con perfil A:
suma_perfiles_interv = perfs_interv['COEF. PERFIL A'].sum()
consumo_estimado = pd.Series(perfs_interv['COEF. PERFIL A'] * consumo_total_interv_kwh / suma_perfiles_interv)
print(consumo_estimado)
self.assertIs(consumo_estimado.empty, False)
|
samn/spectral-workbench | webserver/public/lib/bespin-0.9a2/lib/dryice/tests/test_combiner.py | Python | gpl-3.0 | 2,130 | 0.000939 | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Bespin.
#
# The Initial Developer of the Original Code is
# Mozilla.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# Alternatively, the contents of this file may be used under the terms of
# eithe | r the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decisio | n by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
from cStringIO import StringIO
from path import path
from dryice.combiner import Package, combine_files
from dryice.plugins import Plugin
def test_package_index_generation():
p = path(__file__).dirname() / "noindexapp"
output = StringIO()
plugin = Plugin("noindexapp", p, dict(name="testing"))
combine_files(output, StringIO(), plugin, p)
combined = output.getvalue()
print combined
assert 'tiki.module("noindexapp:index"' in combined
assert 'tiki.main' not in combined
|
migueldvb/piernik | bin/qa.py | Python | gpl-3.0 | 15,798 | 0.003735 | #!/usr/bin/env python
import re
import sys
import hashlib
import subprocess as sp
import numpy as np
debug = False
typ1 = np.dtype([('name', 'a50'), ('beg', 'i'), ('end', 'i'), ('type', 'a4')])
# starts with spaces or spaces and one of { 'end', 'pure', ... }
# if function it can have a type next goes subroutine or function or type
test_for_routines = re.compile('''
^\s{0,12}(|end|pure|elemental|recursive|real|logical|integer)\s
(|pure|elemental|recursive|real|logical|integer)(|\s)
(subroutine|function|type(,|\s))
''', re.VERBOSE)
# starts with spaces or spaces and one of { 'end', 'pure', ... }
# next goes subroutine or function or type
test_for_interfaces = re.compile('''
^\s{0,12}(|end|abstract)\s
interface
''', re.VERBOSE)
# test_for_routines = re.compile('''
# ^(?!\s{0,9}!).*(subroutine|function|type(,|\s::))
# ''',re.VERBOSE)
module_body = re.compile(
'''^(module|contains|program)''', re.VERBOSE)
just_end = re.compile('''^\s{0,9}end''', re.IGNORECASE)
have_implicit = re.compile('''implicit\snone''', re.IGNORECASE)
have_privpub = re.compile('''^\s{0,9}(public|private)''', re.VERBOSE)
have_pub = re.compile('''^\s{0,9}public''', re.VERBOSE)
have_priv = re.compile('''^\s{0,9}private\s::''', re.VERBOSE)
remove_warn = re.compile('''(?!.*QA_WARN .+)''', re.VERBOSE)
have_global_public = re.compile('''^\s{0,9}public(?!.*::)''', re.VERBOSE)
depr_syntax_1 = re.compile('''
^\s{1,12}(?i)(?:real(?:\s|,)|integer(?:\s|,)|logical
(?:\s|,|\()|character(?:\s|,))(?!.*::)
''', re.IGNORECASE)
depr_syntax_2 = re.compile(
'''^\s{1,12}(?i)use[\s](?!.*only)''', re.IGNORECASE)
depr_syntax_3 = re.compile(
'''^\s{1,12}(?i)character(?![(])''', re.IGNORECASE)
is_function = re.compile('''(?i)\sfunction\s''', re.IGNORECASE)
not_function = re.compile('''(?!.*function)''', re.IGNORECASE)
tab_char = re.compile('\t')
has_use = re.compile("^\s{1,12}(?i)use\s")
have_label = re.compile('^[0-9]', re.VERBOSE)
crude_write = re.compile("write *\( *\*", re.IGNORECASE)
magic_integer = re.compile("\(len=[1-9]", re.IGNORECASE)
continuation = re.compile('&$', re.VERBOSE)
implicit_save = re.compile('''
(?i)(?:real(?:\s|,)|integer(?:\s|,)|logical(?:\s|,|\()|character(?:\s|,)).*
::.*=(|\s)[0-9]
''', re.IGNORECASE)
not_param_nor_save = re.compile("(?!.*(parameter|save))", re.IGNORECASE)
nasty_spaces = [
re.compile("end\s{1,}do", re.IGNORECASE), "enddo",
re.compile("end\s{1,}if", re.IGNORECASE), "endif",
re.compile("end\s{1,}while", re.IGNORECASE), "endwhile",
re.compile("end\s{1,}where", re.IGNORECASE), "endwhere",
re.compile("only\s{1,}:", re.IGNORECASE), "only:",
re.compile("if(|\s{2,})\(", re.IGNORECASE), "if (",
re.compile("where(|\s{2,})\(", re.IGNORECASE), "where (",
re.compile("while(|\s{2,})\(", re.IGNORECASE), "while (",
re.compile("forall(|\s{2,})\(", re.IGNORECASE), "forall (",
re.compile("\scase(|\s{2,})\(", re.IGNORECASE), " case ("
]
class bcolors | :
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
| self.ENDC = ''
b = bcolors()
def remove_binaries(files):
list = []
for file in files:
checkFile = sp.Popen('file -bi ' + file, stdout=sp.PIPE, shell=True,
executable="/bin/bash")
if not checkFile.communicate()[0].startswith('text'):
print b.WARNING + "QA: " + b.ENDC + file + \
" is not a text file. I will not test it."
else:
list.append(file)
return list
def select_sources(files):
test = re.compile("F90$", re.IGNORECASE)
return filter(test.search, files)
def wtf(lines, line, rname, fname):
if(type(lines) == np.ndarray):
linenum = line_num(lines, line)
else:
linenum = lines
line = line.split("!")[0] # Strip comments
if(rname == ''):
return " [%s]@L%i => %s" % (fname, linenum, line.strip())
else:
return " [%s:%s]@L%i => %s" % (fname, rname, linenum, line.strip())
def line_num(lines, line):
return np.where(lines == line)[0][0]
def give_warn(s):
return b.WARNING + s + b.ENDC
def give_err(s):
return b.FAIL + s + b.ENDC
def parse_f90file(lines, fname, store):
if (debug):
print "[parse_f90file] fname = ", fname
subs_array = np.zeros((0,), dtype=typ1)
subs = filter(test_for_routines.search, lines)
subs_names = []
subs_types = []
for f in subs:
if (just_end.match(f)):
word = f.strip().split(' ')
subs_types.insert(0, word[1])
subs_names.append(word[2])
for f in subs_names:
cur_sub = filter(re.compile(f).search, subs)
if (len(cur_sub) > 2):
if (debug):
print "[parse_f90file] f, cur_sub = ", f, cur_sub
for index in range(0, len(cur_sub)):
if just_end.match(cur_sub[index]):
if cur_sub[index].split()[1] == subs_types[-1] and \
cur_sub[index][-len(f):] == f:
break
else:
index = 1
obj = (f, line_num(lines, cur_sub[index - 1]), line_num(
lines, cur_sub[index]), subs_types.pop())
subs_array = np.append(subs_array, np.array([obj], dtype=typ1))
if (debug):
print "[parse_f90file] subs = ", subs
print "[parse_f90file] subs_names = ", subs_names
mod = filter(module_body.match, lines)
if (len(mod) > 1):
obj = (mod[0].strip().split(" ")[1],
line_num(lines, mod[0]),
line_num(lines, mod[1]),
mod[0].strip().split(" ")[0][0:3]
)
subs_array = np.append(subs_array, np.array([obj], dtype=typ1))
elif (len(mod) == 1):
obj = (mod[0].strip(
).split(" ")[1], line_num(lines, mod[0]), len(lines), 'mod')
subs_array = np.append(subs_array, np.array([obj], dtype=typ1))
else:
store.append(
give_warn("QA: ") + "[%s] => module body not found!" % fname)
return subs_array
def qa_check_id(store, fname):
client = pysvn.Client()
entry = client.info('.')
try:
for f in client.proplist(fname):
pname, props = f
fail = False
if 'svn:keywords' in props:
if 'Id' not in props['svn:keywords']:
fail = True
else:
fail = True
if fail:
store.append(
give_err("QA: ") + "%s lacks svn:keywords Id" % (pname))
except:
return 0
def qa_checks(files, options):
print b.OKBLUE + \
'"I am the purifier, the light that clears all shadows."' + \
' - seal of cleansing inscription' + b.ENDC
runpath = sys.argv[0].split("qa.py")[0]
files = remove_binaries(files)
# ToDo: check files other than F90
f90files = select_sources(files)
warns = []
errors = []
for f in f90files:
# qa_check_id(errors, f)
pfile = []
lines = open(f, 'r').readlines()
for line in lines:
# things done in "in-place"
line = line.rstrip() # that removes trailing spaces
for i in range(0, len(nasty_spaces), 2):
line = re.sub(nasty_spaces[i], nasty_spaces[
i + 1], line) # remove nasty spaces
pfile.append(line)
if lines != [line + '\n' for line in pfile]:
fp = open(f, 'w')
for line in pfile:
fp.write(line + '\n')
fp.close()
# f = f.split('/')[-1]
# checks for f90 file as whole
qa_nonconforming_tabs(np.array(pfile), '', errors, f)
qa_labels(np.array(pfile), '', errors, f)
qa_crude_write(np.array(pfile), '', warns, f)
qa_magic_integers(np.array(pfile), '', warns, f)
# checks that require parsing f90 files
clean_ |
Yelp/paasta | paasta_tools/mesos/cluster.py | Python | apache-2.0 | 2,143 | 0.000467 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import itertools
from . import exceptions
async def get_files_for_tasks(task_list, file_list, max_workers):
no_files_found = True
async def process(task_fname):
task, fname = task_fname
try:
fobj = await task.file(fname)
except exceptions.SlaveDoesNotExist:
if task is None:
print(f"(Unknown Task):{fname} (Slave no longer exists)")
else:
print(f"{task['id']}:{task_fname} (Slave no longer exists)")
raise exceptions.SkipResult
if await fobj.exists():
return fobj
elements = itertools.chain(
*[[(task, fname) for fname in file_list] for task in task_list]
)
futures = [asyncio.ensure_future(process(element)) for element in elements]
if futures:
for result in asyncio.as_completed(futures):
try:
resu | lt = await result
if result:
no_files_found = False
| yield result
except exceptions.SkipResult:
pass
if no_files_found:
raise exceptions.FileNotFoundForTaskException(
"None of the tasks in {} contain the files in list {}".format(
",".join([task["id"] for task in task_list]), ",".join(file_list)
)
)
|
smidm/camera.py | docs/conf.py | Python | mit | 7,772 | 0.007334 | # -*- coding: utf-8 -*-
#
# camera.py documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 8 12:43:10 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another dire | ctory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use | os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'camera.py'
copyright = u'2014, Matej Smid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'camerapydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'camerapy.tex', u'camera.py Documentation',
u'Matej Smid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'camerapy', u'camera.py Documentation',
[u'Matej Smid'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'camerapy', u'camera.py Documentation',
u'Matej Smid', 'camerapy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
chelberserker/mipt_cs_on_python | lab 7/ex7.py | Python | gpl-2.0 | 275 | 0.054545 | import math
import numpy as np
import matplotlib.pyplot as plt
def f(x):
s = 0
b = 0.5
a = 3
for n in range(1, 50):
s += (b**n)*(math.cos((a**n)*(math.pi)*x))
| y = s
return y
x=np.arange(-2,2,0.01)
y = [f(_x) for _x in x]
plt.grid(True)
plt.plot(x, y)
plt.show()
| |
general-language-syntax/GLS | test/integration/ForEachKeyStart/for each key start.py | Python | mit | 36 | 0.027778 | #
for k | ey in container:
| # ...
#
|
jdber1/opendrop | opendrop/app/common/image_acquisition/image_acquisition.py | Python | gpl-3.0 | 3,443 | 0.001744 | # Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gn | u.org/licenses/>.
from typing import Optional
from gi.repository import Gtk, GObject
from injector import inject
from opendrop.app.common.services.acquisition import AcquirerType, ImageAcquisitionService, ImageAcquirer
from opendrop.appfw import Presenter, TemplateChild, component
@component(
template_path='./image_acquisition.ui',
)
class ImageAcquisitionPresenter(Presenter):
combo_box = TemplateChild('combo_box') # type: Templat | eChild[Gtk.ComboBoxText]
@inject
def __init__(self, acquisition_service: ImageAcquisitionService) -> None:
self.acquisition_service = acquisition_service
self.event_connections = ()
def after_view_init(self) -> None:
self.populate_combobox()
self.event_connections = (
self.acquisition_service.bn_acquirer.on_changed.connect(self.acquisition_service_acquirer_changed),
)
self.combo_box.connect('notify::active-id', self.combo_box_active_id_changed)
self.acquisition_service_acquirer_changed()
def combo_box_active_id_changed(self, *_) -> None:
active_id = self.combo_box.props.active_id
if active_id is not None:
self.acquisition_service.use_acquirer_type(AcquirerType[active_id])
else:
self.acquisition_service.use_acquirer_type(None)
def acquisition_service_acquirer_changed(self, *_) -> None:
acquirer_type = self.acquisition_service.get_acquirer_type()
if acquirer_type is not None:
self.combo_box.props.active_id = acquirer_type.name
else:
self.combo_box.props.active_id = None
self.notify('acquirer')
@GObject.Property(flags=GObject.ParamFlags.READABLE|GObject.ParamFlags.EXPLICIT_NOTIFY)
def acquirer(self) -> Optional[ImageAcquirer]:
return self.acquisition_service.bn_acquirer.get()
def populate_combobox(self) -> None:
for typ in AcquirerType:
self.combo_box.append(id=typ.name, text=typ.display_name)
def destroy(self, *_) -> None:
for conn in self.event_connections:
conn.disconnect()
|
VulcanoAhab/delphi | workers/serializers.py | Python | mit | 2,526 | 0.016627 | from rest_framework import serializers
from workers.models import (TaskConfig,
Task,
Job,
TaskProducer)
from grabbers.serializers import (MapperSerializer,
SequenceSerializer)
from grabbers.models import Sequence
from drivers.serializers import DriverSerializer
from drivers.models import Driver
from django.core.exceptions import ObjectDoesNotExist
# == helpers ==
from delphi.utils.lizers import _required_fields, _get_or_instance
class TaskConfigDetailSerializer(serializers.ModelSerializer):
'''
'''
driver=DriverSerializer()
sequence=SequenceSerializer()
class Meta:
model=TaskConfig
#no | proxy by api yet - missing fields::proxy,network_cap
fields=('name','driver','sequence','mapper','round_limit')
def create(self, validated_data):
'''
'''
name=validated_data['name']
try:
task_config=TaskConfig.objects.get(name=name)
print("[-] We already this guy i | n db")
return task_config
except TaskConfig.DoesNotExist:
task_config=TaskConfig(name=name)
driver=_get_or_instance(Driver,'name',
validated_data['driver'],DriverSerializer)
sequence=_get_or_instance(Sequence,'name',
validated_data['sequence'],
SequenceSerializer)
task_config.driver=driver
task_config.sequence=sequence
return task_config
class TaskConfigListSerializer(serializers.HyperlinkedModelSerializer):
'''
'''
class Meta:
model=TaskConfig
fields=('url', 'name', 'sequence', 'driver', 'mapper','round_limit')
extra_kwargs = {
'url': {'view_name': 'api:task_config-detail', 'lookup_field':'name'},
'driver': {'view_name': 'api:driver-detail', 'lookup_field':'name'},
'sequence':{'view_name': 'api:sequence-detail', 'lookup_field':'name'},
'mapper':{'view_name':'api:mapper-detail', 'lookup_field':'name'},
}
class JobSerializer(serializers.ModelSerializer):
'''
'''
class Meta:
model=Job
fields=('status','name')
class TaskSerializer(serializers.ModelSerializer):
'''
'''
config=TaskConfigDetailSerializer()
job=JobSerializer()
class Meta:
model=Task
fields=('target_url', 'config', 'status', 'job')
|
flysmoke/ijizhang | ijizhang_prj/jizhang/tests.py | Python | mit | 74 | 0.013514 | #cod | ing=utf-8
from django.test import TestCase
# Create your tests here.
| |
rullmann/bundlewrap-collectd | items.py | Python | mit | 5,646 | 0.002657 | pkg_dnf = {
'collectd': {},
'collectd-chrony': {},
'collectd-curl': {},
'collectd-curl_json': {},
'collectd-curl_xml': {},
'collectd-netlink': {},
'rrdtool': {},
}
if node.os == 'fedora' and node.os_version >= (26):
pkg_dnf['collectd-disk'] = {}
svc_systemd = {
'collectd': {
'needs': ['pkg_dnf:collectd'],
},
}
files = {
'/etc/collectd.conf': {
'source': 'collectd.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'collectd': node.metadata.get('collectd', {}),
},
'needs': ['pkg_dnf:collectd', 'pkg_dnf:rrdtool'],
'triggers': ['svc_systemd:collectd:restart'],
},
'/etc/collectd.d/nut.conf': {
'delete': True,
'needs': ['pkg_dnf:collectd'],
},
}
actions = {}
directories = {
'/etc/collectd.d/plugins': {
'mode': '0755',
'needs': ['pkg_dnf:collectd'],
},
'/etc/collectd.d/types': {
'mode': '0755',
'needs': ['pkg_dnf:collectd'],
},
}
git_deploy = {}
if node.metadata.get('collectd', {}).get('write_rrd', True):
pkg_dnf['collectd-rrdtool'] = {
'triggers': ['svc_systemd:collectd:restart'],
}
if node.metadata.get('collectd', {}).get('client'):
files['/etc/collectd.d/client.conf'] = {
'source': 'client.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'client': node.metadata.get('collectd', {}).get('client', {}),
},
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
if node.metadata.get('collectd', {}).get('server'):
files['/etc/collectd.d/server.conf'] = {
'source': 'server.conf',
'mode': '0600',
'content_type': 'mako',
'context': {
'server': node.metadata.get('collectd', {}).get('server', {}),
},
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
files['/etc/collectd.d/collectd.auth'] = {
'source': 'server_auth/{}.auth'.format(node.name),
'mode': '0600',
'needs': ['pkg_dnf:collectd'],
'triggers': ['svc_systemd:collectd:restart'],
}
if node.has_bundle('firewalld'):
port = node.metadata.get('collectd', {}).get('server', {}).get('port', '25826')
if node.metadata.get('collectd', {}).get('server', {}).get('firewalld_permitted_zone'):
zone = | node.metadata.get('collectd', {}).get('server', {}).get('firewalld_permitted_zone')
actions['firewalld_add_collectd_zone_{}'.format(zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(zone, port | ),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('default_zone'):
default_zone = node.metadata.get('firewalld', {}).get('default_zone')
actions['firewalld_add_collectd_zone_{}'.format(default_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(default_zone, port),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(default_zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
elif node.metadata.get('firewalld', {}).get('custom_zones', False):
for interface in node.metadata['interfaces']:
custom_zone = node.metadata.get('interfaces', {}).get(interface).get('firewalld_zone')
actions['firewalld_add_collectd_zone_{}'.format(custom_zone)] = {
'command': 'firewall-cmd --permanent --zone={} --add-port={}/udp'.format(custom_zone, port),
'unless': 'firewall-cmd --zone={} --list-ports | grep {}/udp'.format(custom_zone, port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
else:
actions['firewalld_add_https'] = {
'command': 'firewall-cmd --permanent --add-port={}/udp'.format(port),
'unless': 'firewall-cmd --list-ports | grep {}/udp'.format(port),
'cascade_skip': False,
'needs': ['pkg_dnf:firewalld'],
'triggers': ['action:firewalld_reload'],
}
if node.metadata.get('collectd', {}).get('cgp', {}):
cgp_install_path = node.metadata.get('collectd', {}).get('cgp', {}).get('install_path')
directories['{}'.format(cgp_install_path)] = {
'mode': '0755',
}
git_deploy['{}'.format(cgp_install_path)] = {
'needs': [
'directory:{}'.format(cgp_install_path)
],
'repo': 'https://github.com/pommi/CGP.git',
'rev': 'master',
}
files['{}/conf/config.local.php'.format(cgp_install_path)] = {
'source': 'cgp_config',
'mode': '0644',
'needs': ['git_deploy:{}'.format(cgp_install_path)],
}
if node.has_bundle('monit'):
files['/etc/monit.d/collectd'] = {
'source': 'monit',
'mode': '0600',
'content_type': 'mako',
'context': {
'server': node.metadata.get('collectd', {}).get('server', {}),
},
'triggers': ['svc_systemd:monit:restart'],
}
|
erral/eitbapi | eitbapi/cors.py | Python | gpl-2.0 | 1,856 | 0.001078 | from pyramid.security impo | rt NO_PERMISSION_REQUIRED
def includeme(config):
config.add_directive("add_cors_preflight_handler", add_cors_preflight_handler)
config.add_route_predicate("cors_preflight", CorsPreflightPredicate)
confi | g.add_subscriber(add_cors_to_response, "pyramid.events.NewResponse")
class CorsPreflightPredicate(object):
def __init__(self, val, config):
self.val = val
def text(self):
return "cors_preflight = %s" % bool(self.val)
phash = text
def __call__(self, context, request):
if not self.val:
return False
return (
request.method == "OPTIONS"
and "Origin" in request.headers
and "Access-Control-Request-Method" in request.headers
)
def add_cors_preflight_handler(config):
config.add_route(
"cors-options-preflight", "/{catch_all:.*}", cors_preflight=True,
)
config.add_view(
cors_options_view,
route_name="cors-options-preflight",
permission=NO_PERMISSION_REQUIRED,
)
def add_cors_to_response(event):
request = event.request
response = event.response
if "Origin" in request.headers:
response.headers[
"Access-Control-Expose-Headers"
] = "Content-Type,Date,Content-Length,Authorization,X-Request-ID"
response.headers["Access-Control-Allow-Origin"] = request.headers["Origin"]
response.headers["Access-Control-Allow-Credentials"] = "true"
def cors_options_view(context, request):
response = request.response
if "Access-Control-Request-Headers" in request.headers:
response.headers["Access-Control-Allow-Methods"] = "OPTIONS,HEAD,GET"
response.headers[
"Access-Control-Allow-Headers"
] = "Content-Type,Accept,Accept-Language,Authorization,X-Request-ID"
return response
|
patrickwolf/python-tutorial | pyintro_a_basics/i_string_operators.py | Python | mit | 338 | 0.017751 | '''
@summary: Python Intro - Operators
'''
# --------------------- | -
# operator + - concatenation
# ----------------------
print 5 + 10.2 # 15.2
print "abc" + "def" # abcdef
# ---------------------------
# operator * - repeat
# ---------------------------
print "abc,"*5 # abc,abc,abc,abc,abc,
print ( | 5,3,2)*2 # (5, 3, 2, 5, 3, 2)
|
lablup/sorna-manager | src/ai/backend/gateway/types.py | Python | lgpl-3.0 | 720 | 0 | from | __future__ import annotations
from typing import (
Any,
Awaitable, Callable, Iterable,
AsyncIterator,
Tuple,
Mapping,
)
from aiohttp import web
import aiohttp_cors
WebReq | uestHandler = Callable[
[web.Request],
Awaitable[web.StreamResponse]
]
WebMiddleware = Callable[
[web.Request, WebRequestHandler],
Awaitable[web.StreamResponse]
]
CORSOptions = Mapping[str, aiohttp_cors.ResourceOptions]
AppCreator = Callable[
[CORSOptions],
Tuple[web.Application, Iterable[WebMiddleware]]
]
PluginAppCreator = Callable[
[Mapping[str, Any], CORSOptions],
Tuple[web.Application, Iterable[WebMiddleware]]
]
CleanupContext = Callable[[web.Application], AsyncIterator[None]]
|
DraXus/andaluciapeople | blogapp/views.py | Python | agpl-3.0 | 5,160 | 0.014341 | from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ObjectDoesNotExist
from blogapp.models import *
from blogapp.utilities import *
from blogapp.forms import CommentForm
from andaluciapeople.sitios.models import DatosUsuario
BLOG_TPL = 'blog.html'
#P_LIMIT = int(options('posts_per_page'))
P_LIMIT = 10
def homepage(request):
pg = Paginator(Post.objects.all(), P_LIMIT)
try:
page_q = int(request.GET | .get('page', 1))
except ValueError:
page_q = 1
try:
p = pg.page(page_q)
posts = p.object_list
except InvalidPage:
return not_found(request, message=_("Sorry, the page does not exist."))
context = {'posts': posts,
'page': p,
'options': options(),
}
return render_to_response | (BLOG_TPL, context, context_instance=RequestContext(request))
def post_by_name(request, post_name):
try:
post = Post.objects.get(name=post_name)
except ObjectDoesNotExist:
return not_found(request, message=_("Sorry, the requested post does not exist."))
#check if comments are enabled
if not post.disable_comments:
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
#result will either be a http redirect or an error string
result = process_comment(request, post, form)
if isinstance(result, unicode):
form = CommentForm(request.POST, auto_id=False)
form.errors['generic'] = result
else:
#redirect
return result
else:
form.errors['generic'] = _("Check that the required fields are filled in correctly.")
else:
#values are pickled to enable unicode strings to be stored
#unpickle_cookies(request)
#form = CommentForm(request.COOKIES, auto_id=False)
if request.user.is_authenticated():
form = CommentForm(initial = {
"author_name": request.user.username,
"author_email": request.user.email,
"author_website": DatosUsuario.objects.get(user=request.user).web
})
else:
form = CommentForm()
else:
form = None
#takes comments where comment_type is 'comment' or 'linkback'
comments = post.comment_set.filter(comment_type='comment') | post.comment_set.filter(comment_type='linkback')
context = {
'posts': [post],
'comments': comments,
'title': post.title,
'comment_form': form,
'options': options(),
}
return render_to_response(BLOG_TPL, context, context_instance=RequestContext(request))
def posts_by_tag(request, tag_name):
try:
tag = Tag.objects.get(name=tag_name)
except ObjectDoesNotExist:
return not_found(request, message=_("Sorry, the tag you are searching for does not exist."))
pg = Paginator(tag.post_set.all(), P_LIMIT)
try:
page_q = int(request.GET.get('page', 1))
except ValueError:
page_q = 1
try:
p = pg.page(page_q)
posts = p.object_list
except InvalidPage:
return not_found(request, message=_("Sorry, the page does not exist."))
context = {'posts': posts,
'page': p,
'options': options(),
}
return render_to_response(BLOG_TPL, context, context_instance=RequestContext(request))
def posts_by_date(request, year, month):
posts = Post.objects.filter(date__year=year, date__month=month)
if posts:
pg = Paginator(posts, P_LIMIT)
try:
page_q = int(request.GET.get('page', 1))
except ValueError:
page_q = 1
try:
p = pg.page(page_q)
posts = p.object_list
except InvalidPage:
return not_found(request, message=_("Sorry, the page does not exist."))
context = {
'posts': posts,
'page': p,
'options': options(),
}
return render_to_response(BLOG_TPL, context, context_instance=RequestContext(request))
else:
return not_found(request, message=_("Sorry, there are no posts written that month."))
def page_by_name(request, page_name):
try:
page = Page.objects.get(name=page_name)
except ObjectDoesNotExist:
return not_found(request, message=_("Sorry, the requested page does not exist."))
context = {
'page': page,
'title': page.title,
'options': options(),
}
return render_to_response(BLOG_TPL, context, context_instance=RequestContext(request))
def feed(request, feed_type):
posts = Post.objects.all()[:10]
template = "feeds/%s.xml" % feed_type
m_type = "application/xml"#"application/%s+xml" % feed_type
updated = posts[0].date #used by atom
context = {
'posts': posts,
'updated': updated,
'options': options(),
}
return render_to_response(template, context, mimetype=m_type)
|
rossella/neutron | quantum/plugins/nec/agent/nec_quantum_agent.py | Python | apache-2.0 | 4,286 | 0 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation.
# Based on ryu/openvswitch agents.
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Copyright | 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag | reed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import socket
import sys
import time
from quantum.agent.linux import ovs_lib
from quantum.common import config as logging_config
from quantum.common import topics
from quantum import context
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.plugins.nec.common import config
LOG = logging.getLogger(__name__)
class NECQuantumAgent(object):
def __init__(self, integ_br, root_helper, polling_interval):
'''Constructor.
:param integ_br: name of the integration bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to check the bridge.
'''
self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)
self.polling_interval = polling_interval
self.host = socket.gethostname()
self.agent_id = 'nec-q-agent.%s' % self.host
self.datapath_id = "0x%s" % self.int_br.get_datapath_id()
# RPC network init
self.context = context.get_admin_context_without_session()
self.conn = rpc.create_connection(new=True)
def update_ports(self, port_added=[], port_removed=[]):
"""RPC to update information of ports on Quantum Server"""
LOG.info(_("Update ports: added=%(port_added)s, "
"removed=%(port_removed)s"),
locals())
try:
rpc.call(self.context,
topics.PLUGIN,
{'method': 'update_ports',
'args': {'topic': topics.AGENT,
'agent_id': self.agent_id,
'datapath_id': self.datapath_id,
'port_added': port_added,
'port_removed': port_removed}})
except Exception as e:
LOG.warn(_("update_ports() failed."))
return
def _vif_port_to_port_info(self, vif_port):
return dict(id=vif_port.vif_id, port_no=vif_port.ofport,
mac=vif_port.vif_mac)
def daemon_loop(self):
"""Main processing loop for NEC Plugin Agent."""
old_ports = []
while True:
new_ports = []
port_added = []
for vif_port in self.int_br.get_vif_ports():
port_id = vif_port.vif_id
new_ports.append(port_id)
if port_id not in old_ports:
port_info = self._vif_port_to_port_info(vif_port)
port_added.append(port_info)
port_removed = []
for port_id in old_ports:
if port_id not in new_ports:
port_removed.append(port_id)
if port_added or port_removed:
self.update_ports(port_added, port_removed)
else:
LOG.debug(_("No port changed."))
old_ports = new_ports
time.sleep(self.polling_interval)
def main():
config.CONF(project='quantum')
logging_config.setup_logging(config.CONF)
# Determine which agent type to use.
integ_br = config.OVS.integration_bridge
root_helper = config.AGENT.root_helper
polling_interval = config.AGENT.polling_interval
agent = NECQuantumAgent(integ_br, root_helper, polling_interval)
# Start everything.
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
|
Mitali-Sodhi/CodeLingo | Dataset/python/pincode.py | Python | mit | 3,279 | 0.028667 | import tornado.ioloop
import tornado.web
import json
import MySQLdb
class MainHandler(tornado.web.RequestHandler):
# def fetch_states(self):
# pincode_file = open('pincode.json')
# pincode_json = json.load(pincode_file)
# state_list = {}
# state_list['states'] = []
# state_list['state_count'] = 0
# for line in pincode_json:
# if line['statename'] not in state_list['states'] and line['statename'] not in "NULL":
# state_list['states'].append(line['statename'])
# state_list['state_count'] += 1
# state_list_json = json.dumps(state_list)
# pincode_file.close()
# return state_list_json
# def fetch_districts(self, statename):
# statename = statename.replace('+', ' ')
# pincode_file = open('pincode.json')
# pincode_json = json.load(pincode_file)
# district_list = {}
# district_list['state_name'] = statename
# district_list['districts'] = []
# district_list['districts_count'] = 0
# for line in pincode_json:
# if line['statename'] in statename and line['Districtname'] not in district_list['districts']:
# district_list['districts'].append(line['Districtname'])
# district_list['districts_count'] += 1
# state_list_json = json.dumps(district_list)
# pincode_file.close()
# return state_list_json
# def fetch_places_by_pin(self, pincode):
# pincode_file = open('pincode.json')
# pincode_json = json.load(pincode_file)
# pin_list = {}
# pin_list['pincode'] = pincode
# pin_list['places'] = []
# pin_list['places_count'] = 0
# for line in pincode_json:
# if str(line['pincode']) in pincode:
# pin_list['places'].append(line)
# pin_list['places_count'] += 1
# state_list | _json = json.dumps(pin_list)
# pincode_file.close()
# return state_list_json
def fetch_pincode(self, pincode):
place_list = {}
place_list['status'] = 200
place_list['status_text'] = "Success"
place_list['count'] = 0
place_list['data'] = []
db = MySQLdb.connect(host="localhost", user="root", passwd="password", db="pincode")
cursor = db.cursor()
curso | r.execute('SELECT * from pincode where pincode=%s', (pincode));
for row in cursor.fetchall():
s_list = {}
s_list['Office_Name'] = row[0]
s_list['Pincode'] = int(row[1])
s_list['Office_Type'] = row[2]
s_list['Delivery_Status'] = row[3]
s_list['Division_Name'] = row[4]
s_list['Region_Name'] = row[5]
s_list['Circle_Name'] = row[6]
s_list['Taluk'] = row[7]
s_list['District_Name'] = row[8]
s_list['State_Name'] = row[9]
place_list['data'].append(s_list)
place_list['count'] += 1
self.write(json.dumps(place_list))
def get(self):
#states = self.get_argument('states', None)
#districts = self.get_argument('districts', None)
pin = self.get_argument('pin', None)
if pin:
self.fetch_pincode(pin)
#if not districts and not pin:
# self.write(self.fetch_states())
#if districts and not pin and not states:
# self.write(self.fetch_districts(self.get_argument('districts')))
#if pin and not districts and not states:
# self.write(self.fetch_places_by_pin(self.get_argument('pincode')))
application = tornado.web.Application([
(r"/pincode/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start() |
johngumb/gb3wx | dtmf-generator/src/gb3wxsetup.py | Python | gpl-3.0 | 596 | 0.015101 | import sys
import | os
import subprocess
import time
import string
import datetime
import serial
if __name__=="__main__":
serport = '/dev/ttyUSB0'
ser = serial.Serial(serport)
if len(sys.argv) > 1:
val = True
else:
val = False
start=2730
for i in range(start, 100000):
ser.setRTS(True)
hstr=repr(i)
#hstr="0421"
#p = subprocess.Popen(['./dtmfgen','-dplughw:1','-k%s' % hstr, '-t50'])
#p.wait | ()
time.sleep(0.7)
ser.setRTS(False)
time.sleep(0.4)
|
mercycorps/TolaTables | tola/forms.py | Python | gpl-2.0 | 3,291 | 0.003646 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
from crispy_forms.layout import Layout, Submit, Reset, Div
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from silo.models import TolaUser
from django.contrib.auth.models import User
class RegistrationForm(UserChangeForm):
"""
Form for registering a new account.
"""
def __init__(self, *args, **kwargs):
user = kwargs.pop('initial')
super(RegistrationForm, self).__init__(*args, **kwargs)
del self.fields['password']
print user['username'].is_superuser
# allow country access change for now until we know how we will use this GWL 012617
# if they aren't a super user or User Admin don't let them change countries form field
# if 'User Admin' not in user['username'].groups.values_list('name', flat=True) and not user['username'].is_superuser:
# self.fields['country'].widget.attrs['disabled'] = "disabled"
self.fields['created'].widget.attrs['disabled'] = "disabled"
class Meta:
model = TolaUser
fie | lds = '__all__'
helper = For | mHelper()
helper.form_method = 'post'
helper.form_class = 'form-horizontal'
helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-6'
helper.form_error_title = 'Form Errors'
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
helper.layout = Layout(Fieldset('','title', 'name',
'country'),
Submit('submit', 'Submit', css_class='btn-default'),
Reset('reset', 'Reset', css_class='btn-warning'))
class NewUserRegistrationForm(UserCreationForm):
"""
Form for registering a new account.
"""
class Meta:
model = User
fields = ['first_name', 'last_name','email','username']
def __init__(self, *args, **kwargs):
super(NewUserRegistrationForm, self).__init__(*args, **kwargs)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'form-horizontal'
helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-6'
helper.form_error_title = 'Form Errors'
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
helper.form_tag = False
class NewTolaUserRegistrationForm(forms.ModelForm):
"""
Form for registering a new account.
"""
class Meta:
model = TolaUser
fields = ['title', 'country', 'privacy_disclaimer_accepted']
def __init__(self, *args, **kwargs):
super(NewTolaUserRegistrationForm, self).__init__(*args, **kwargs)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'form-horizontal'
helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-6'
helper.form_error_title = 'Form Errors'
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
helper.form_tag = False
helper.layout = Layout(
Fieldset('Information','title', 'country'),
Fieldset('Privacy Statement','privacy_disclaimer_accepted',),
)
|
uclouvain/osis | ddd/logic/encodage_des_notes/soumission/domain/service/encoder_notes_en_lot.py | Python | agpl-3.0 | 6,960 | 0.003592 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from typing import List, Dict, Set
from base.ddd.utils.business_validator import MultipleBusinessExceptions
from ddd.logic.encodage_des_notes.shared_kernel.domain.model.encoder_notes_rapport import EncoderNotesRapport
from ddd.logic.encodage_des_notes.shared_kernel.domain.service.i_inscription_examen import IInscriptionExamenTranslator
from ddd.logic.encodage_des_notes.shared_kernel.dtos import PeriodeEncodageNotesDTO
from ddd.logic.encodage_des_notes.shared_kernel.repository.i_encoder_notes_rapport import IEncoderNotesRapportRepository
from ddd.logic.encodage_des_notes.soumission.builder.note_etudiant_builder import NoteEtudiantBuilder
from ddd.logic.encodage_des_notes.soumission.builder.note_etudiant_identity_builder import NoteEtudiantIdentityBuilder
from ddd.logic.encodage_des_notes.soumission.commands import EncoderNotesEtudiantCommand
from ddd.logic.encodage_des_notes.soumission.domain.model.note_etudiant import IdentiteNoteEtudiant, NoteEtudiant
from ddd.logic.encodage_des_notes.soumission.domain.service.i_historiser_notes import IHistoriserNotesService
from ddd.logic.encodage_des_notes.soumission.domain.validator.exceptions import \
EncoderNotesEtudiantEnLotLigneBusinessExceptions, EtudiantNonInscritAExamenException
from ddd.logic.encodage_des_notes.soumission.dtos import DesinscriptionExamenDTO
from ddd.logic.encodage_des_notes.soumission.repository.i_note_etudiant import INoteEtudiantRepository
from osis_common.ddd import interface
class EncoderNotesEtudiantEnLot(interface.DomainService):
@classmethod
def execute(
cls,
cmd: 'EncoderNotesEtudiantCommand',
note_etudiant_repo: 'INoteEtudiantRepository',
periode_soumission: 'PeriodeEncodageNotesDTO',
historiser_note_service: 'IHistoriserNotesService',
inscription_examen_translator: 'IInscriptionExamenTranslator',
rapport: 'EncoderNotesRapport',
rapport_repository: 'IEncoderNotesRapportRepository'
) -> List['IdentiteNoteEtudiant']:
identite_builder = NoteEtudiantIdentityBuilder()
identites_notes_a_encoder = [
identite_builder.build_from_encoder_note_command(cmd, cmd_note)
for cmd_note in cmd.notes
]
notes_a_encoder = note_etudiant_repo.search(entity_ids=identit | es_notes_a_encoder)
notes_par_identite = {n.entity_id: n for n in notes_a_encoder} # type: Dict[IdentiteNoteEtudiant, NoteEtudiant]
desinscriptions = inscription_examen_translator.search_desinscrits_pour_plusieurs_unites_enseignement(
codes_unites_enseignement={n.code_unite_enseignement for n in notes_a_encoder},
numero_session=periode_soumission.session_concernee,
annee=periode_soumission.annee_concernee
)
exceptions = [] |
notes_a_persister = []
for note_encodee_cmd in cmd.notes:
identite = identite_builder.build_from_encoder_note_command(cmd, note_encodee_cmd)
note_a_modifier = notes_par_identite.get(identite)
if note_a_modifier:
try:
_verifier_etudiant_est_desinscrit(identite, desinscriptions)
nouvelle_note = NoteEtudiantBuilder().build_from_ancienne_note(
ancienne_note=note_a_modifier,
email_encode=note_encodee_cmd.email_etudiant,
nouvelle_note=note_encodee_cmd.note,
)
if note_a_modifier.note != nouvelle_note.note:
notes_a_persister.append(nouvelle_note)
except MultipleBusinessExceptions as e:
for business_exception in e.exceptions:
rapport.add_note_non_enregistree(
noma=note_a_modifier.noma,
numero_session=note_a_modifier.numero_session,
code_unite_enseignement=note_a_modifier.code_unite_enseignement,
annee_academique=note_a_modifier.annee,
cause=str(business_exception.message)
)
exceptions.append(
EncoderNotesEtudiantEnLotLigneBusinessExceptions(
note_id=identite,
exception=business_exception,
)
)
for note in notes_a_persister:
rapport.add_note_enregistree(
noma=note.noma,
numero_session=note.numero_session,
code_unite_enseignement=note.code_unite_enseignement,
annee_academique=note.annee,
)
note_etudiant_repo.save(note)
if notes_a_persister:
historiser_note_service.historiser_encodage(cmd.matricule_fgs_enseignant, notes_a_persister)
rapport_repository.save(rapport)
if exceptions:
raise MultipleBusinessExceptions(exceptions=exceptions)
return [n.entity_id for n in notes_a_persister]
def _verifier_etudiant_est_desinscrit(
identite_note_etudiant: IdentiteNoteEtudiant,
desinscriptions: Set[DesinscriptionExamenDTO]
) -> None:
if any(desinscription for desinscription in desinscriptions if
desinscription.noma == identite_note_etudiant.noma and
desinscription.code_unite_enseignement == identite_note_etudiant.code_unite_enseignement and
desinscription.annee == identite_note_etudiant.annee_academique):
raise MultipleBusinessExceptions(exceptions=[EtudiantNonInscritAExamenException()])
|
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/urbansim/gridcell/average_land_value_per_acre_from_buildings.py | Python | gpl-2.0 | 1,881 | 0.011164 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
from opus_core.misc import safe_array_divide
class average_land_value_per_acre_from_buildings(Variable):
"""Average land value per acre, computed by dividing the gridcell's land value by its
total number of acres (computed using buildings dataset)"""
land_value = "total_land_value_from_buildings"
acres = "acres_of_land"
def dependencies(self):
return [my_attribute_label(self.land_value),
my_attribute_label(self.acres)]
def compute(self, dataset_pool):
acres = self.get_dataset().get_attribute(self.acres)
return safe_array_divide(self.get_dataset().get_attribute(self.land_value),acres)
def post_check(self, values, dataset_pool):
self.do_check("x >= 0", values)
from opus_core.tests import opus_unittest
from opus_core.tests.utils.variable_tester import VariableTester
from numpy import array
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
total_land_value = array([0, 20050, 20050])
acres_of_land = array([1995, 2005, 33])
tester = VariableTester(
__file__,
pack | age_order=['urbansim'],
test_data={
"gridcell":{
"grid_id": array([1,2,3]),
"total_land_value_from_buil | dings":total_land_value,
"acres_of_land":acres_of_land
}
}
)
should_be = array([0.0, 10.0, 607.5757576])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main() |
roverdotcom/django-fuelsdk | django_fuelsdk/constants.py | Python | mit | 221 | 0 | from django.conf import settings
CLIENT_ID = getattr(settings, 'EXACT_TARGET_CL | IENT_ID', '')
CLIENT_SECRET = getattr(settings, 'EXACT_TARGET_CLIENT_SECRET', '')
WSDL_URL = getattr(settings, 'EXACT_TARGET | _WSDL_URL', '')
|
TalShor/SciLMM | scilmm/Matrices/SparseMatrixFunctions.py | Python | gpl-3.0 | 397 | 0 | import numpy as np
from scipy.sparse import csr_matrix
def save_sparse_csr(filename, | array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
| |
ctalbert/mozharness | test/test_base_python.py | Python | mpl-2.0 | 1,129 | 0.000886 | import os
import unittest
import mozha | rness.base.python as python
here = os.path.dirname(os.path.abspath(__file__))
class TestVirtualenvMixin(unittest.TestCase):
def test_package_versions(self):
example = os.path.join(here, 'pip-freeze.example.txt')
output = file(example).read()
mixin = python.VirtualenvMixin()
packages = mixin.package_versions(output)
# from the file
expected = {'MakeItSo': '0.2.6',
| 'PyYAML': '3.10',
'Tempita': '0.5.1',
'WebOb': '1.2b3',
'coverage': '3.5.1',
'logilab-astng': '0.23.1',
'logilab-common': '0.57.1',
'mozdevice': '0.2',
'mozhttpd': '0.3',
'mozinfo': '0.3.3',
'nose': '1.1.2',
'pyflakes': '0.5.0',
'pylint': '0.25.1',
'virtualenv': '1.7.1.2',
'wsgiref': '0.1.2'}
self.assertEqual(packages, expected)
if __name__ == '__main__':
unittest.main()
|
imxana/Flask_init | apps/models.py | Python | mit | 3,510 | 0.004274 | # 'microblog' is an example, change the model for your own.
from flask import g
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
def register(app):
db = SQLAlchemy(app)
# flask command
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
# Database initialization here
db.create_all()
print('Initialized the database.')
@app.cli.command('testdb')
def datatest_command():
"""Insert some data for testing."""
admin = User('admin', 'admin@example.com', '123')
guest = User('guest', 'guest@example.com', '456')
db.session.add(admin)
db.session.add(guest)
py = Category('Python')
p = Post('Hello Python!', 'Python is pretty cool', py)
db.session.add(py)
db.session.add(p)
db.session.commit()
print('OK')
# Models defined here
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
password = db.Column(db.String(20))
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email, psw):
self.username = username
self.email = email
self.password = psw
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(80))
body = db.Column(db.Text)
pub_date = db.Column(db.DateTime)
category_id = db.Column(db.Integer, db.ForeignKey('category.id'))
category = db.relationship('Category',
backref=db.backref('posts', lazy='dynamic'))
def __init__(self, title, body, category, pub_date=None):
self.title = title
self.body = body
if pub_date is None:
pub_date = datetime.utcnow()
self.pub_date = pub_date
self.category = category
| def __repr__(self):
return '<Post %r>' % self.title
class Category(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Strin | g(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Category %r>' % self.name
# Flask-admin initialization here
admin = Admin(app, name='Admin', template_mode='bootstrap3')
class GlobalView(ModelView):
can_create = False
can_edit = True
can_delete = True
can_view_details = True
class UserView(GlobalView):
# can_delete = False # disable model deletion
pass
class PostView(GlobalView):
page_size = 50 # the number of entries to display on the list view
admin.add_view(UserView(User, db.session))
admin.add_view(PostView(Post, db.session))
admin.add_view(GlobalView(Category, db.session))
# Request Lifecycle
@app.before_request
def before_request():
g.db = db
g.User = User
g.Post = Post
g.Category = Category
# @app.teardown_request
# def teardown_request():
# db = getattr(g, 'db', None)
# if db is not None:
# db.close()
# db = None
# getattr(g,'User',None)=None
# getattr(g,'Post',None)=None
|
rs2/pandas | pandas/tests/reshape/test_qcut.py | Python | bsd-3-clause | 8,222 | 0.000486 | import os
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
from pandas.tseries.offsets import (
Day,
Nano,
)
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("labels", ["foo", 1, True])
def test_qcut_incorrect_labels(labels):
# GH 13318
values = range(5)
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize("labels", [["a", "b", "c"], list(range(3))])
def test_qcut_wrong_length_labels(labels):
# GH 13318
values = range(10)
msg = "Bin labels must be one fewer than the number of bin edges"
with pytest.raises(ValueError, match=msg):
qcut(values, 4, labels=labels)
@pytest.mark.parametrize(
"labels, expected",
[
(["a", "b", "c"], Categorical(["a", "b", "c"], ordered=True)),
(list(range(3)), Categorical([0, 1, 2], ordered=True)),
],
)
def test_qcut_list_like_labels(labels, expected):
# GH 13318
values = range(3)
result = qcut(values, 3, labels=labels)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
({"duplicates": "drop"}, None),
({}, "Bin edges must be unique"),
({"duplicates": "raise"}, "Bin edges must be unique"),
({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length, dtype=np.intp)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
@pytest.mark.parametrize("bins", [6, 7])
@pytest.mark.parametrize(
"box, compare",
[
(Series, tm.assert_series_equal),
(np.array, tm.assert_categorical_equal),
(list, tm.assert_equal),
],
)
def test_qcut_bool_coercion_to_int(bins, box, compare):
# issue 20303
data_expected = box([0, 1, 1, 0, | 1] * 10)
data_result = box([False, True, True, False, True] * 10)
expected = qcut(data_expected, bins, duplicates="drop")
result = qcut(data_result, bins, duplicates="drop")
compare(result, expected)
@pytest.mark.parametrize("q", [2, 5, 10])
def test_qcut_nullable_integer(q, any_numeric_ea_dtype):
arr | = pd.array(np.arange(100), dtype=any_numeric_ea_dtype)
arr[::2] = pd.NA
result = qcut(arr, q)
expected = qcut(arr.astype(float), q)
tm.a |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_9_0/basic_services_broker.py | Python | apache-2.0 | 18,594 | 0.002797 | from ..broker import Broker
class BasicServicesBroker(Broker):
controller = "basic_services"
def authenticate(self, **kwargs):
"""Authenticates the user with NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param username: The username of the user as whom to login.
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param password: The password of the user as whom to login.
:type password: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` %Y-%m-%d %H:%M:%S
:param datetime_format: The format to use for date/time input and output.
:type datetime_format: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timezone: Date/time input and output will be performed in the specified timezone. Should be specified as HH:MM offset from GMT. For example, -05:00 specified US Eastern Time, whereas +09:00 specifies Tokyo time. Alternatively, a timezone name may be used. See the API Data Structures page for details. If omitted, the server's configured timezone will be used.
:type timezone: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("authenticate"), kwargs)
def base_uri(self, **kwargs):
"""Returns the base URI for the specified version.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param version: The API version for which the base_uri is needed.
:type version: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("base_uri"), kwargs)
def license_info(self, **kwargs):
"""Returns license information for this NetMRI server.
**Inputs**
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return serial_number: NetMRI serial number.
:rtype serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_id: NetMRI License identifier.
:rtype license_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_expiration: NetMRI License expiration.
:rtype license_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return license_type: NetMRI License type
:rtype license_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mode: NetMRI operation mode. One of 'standalone', 'master' or 'collector'.
:rtype mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return maintenance_expiration: Maintenance expiration for appliance.
:rtype maintenance_expiration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return device_limit: Licensed limit of devices.
:rtype device_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return interface_limit: Licensed limit of interfaces.
:rtype interface_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_limit: Licensed limit of number of ports controlled by SPM.
:rtype spm_limit: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_short_name: Short symbolic names of licensed features.
:rtype modules_short_name: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_support: Support statuses for corresponding modules in modules_short_names.
:rtype modules_support: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_expiration: Expiration times for corresponding modules in modules_short_names.
:rtype modules_expiration: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return modules_name: Long names for corresponding modules in modules_short_names.
:rtype modules_name: Array of String
"""
return self.api_request(self._get_method_fullname("license_info"), kwargs)
def server_info(self, **kwargs):
"""Returns basic information regarding this NetMRI server.
**Inputs**
| ``api version min:`` 2.6
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param api_ | versions_only_ind: Only include API version information in the output.
:type api_versions_only_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return netmri_version: The NetMRI version number running on thi | s appliance or virtual machine.
:rtype netmri_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return latest_api_version: The most recent API version supported by this NetMRI.
:rtype latest_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return requested_api_version: The API version that executed this call.
:rtype requested_api_version: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return host_name: The configured host name of the NetMRI appliance.
:rtype host_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return operating_mode: Indicates if the NetMRI is running in standalone, collector, or operations center mode.
:rtype operating_mode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return mgmt_ip: T |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.