repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
relekang/python-thumbnails
|
thumbnails/__init__.py
|
Python
|
mit
| 2,899
| 0.005174
|
# -*- coding: utf-8 -*-
from thumbnails.conf import settings
from thumbnails.engines import DummyEngine
from thumbnails.helpers import get_engine, generate_filename, get_cache_backend
from thumbnails.images import SourceFile, Thumbnail
__version__ = '0.5.1'
def get_thumbnail(original, size, **options):
"""
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:p
|
aram colormode: Overrides ``THUMBNAIL_COLORM
|
ODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
"""
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get('crop', None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and 'force' in options and options['force']
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail
|
adsabs/ADS_records_merger
|
pipeline_log_functions.py
|
Python
|
gpl-3.0
| 1,639
| 0.003661
|
# Copyright (C) 2011, The SAO/NASA Astrophysics Data System
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
File containing global functions
'''
import sys
import os
import time
from pipeline_settings import VERBOSE
def msg(message, verbose=VERBOSE):
"""
Prints a debug message.
"""
#raise "YOU MUST USE THE LOGGING MODULE"
if verbose:
print time.strftime("%Y-%m-%d %H:%M:%S"), '---', message
def manage_check_error(msg_str, type_check, logger):
"""function that prints a warning or
raises an exception according to the type of check"""
from merger.merger_errors import GenericError
if type_check == 'warnings':
logger.warning(' CHECK WARNING: %s' % msg_str)
elif type_check == 'errors':
logger.critical(msg_str)
raise Gene
|
ricError(msg_str)
else:
error_string = 'Type of check "%s" cannot be handled by the "manage_check_error" function.' % type_check
logger.critical(error_s
|
tring)
raise GenericError(error_string)
return None
|
iotile/coretools
|
iotileship/iotile/ship/recipe_manager.py
|
Python
|
gpl-3.0
| 4,144
| 0.002896
|
import os
from iotile.core.dev import ComponentRegistry
from iotile.ship.recipe import RecipeObject
from iotile.ship.exceptions import RecipeNotFoundError
class RecipeManager:
"""A class that maintains a list of installed recipes and recipe actions.
It allows fetching recipes by name and auotmatically building RecipeObjects
from textual descriptions.
The RecipeManager maintains a dictionary of RecipeAction objects that it
compiles from all installed iotile packages. It passes this dictionary to
any Recipe that is created from it so the recipe can find any recipe
actions that it needs.
The RecipeManager finds RecipeActions by looking for plugins that
are registered with pkg_resources.
"""
def __init__(self):
self._recipe_actions = {}
self._recipe_resources = {}
self._recipes = {}
reg = ComponentRegistry()
for name, action in reg.load_extensions('iotile.recipe_action', product_name='build_step'):
self._recipe_actions[name] = action
for name, resource in reg.load_extensions('iotile.recipe_resource', product_name='build_resource'):
self._recipe_resources[name
|
] = resource
def is_valid_action(self, name):
"""Check if a name describes a valid action.
Args:
name (str): The name of the action to check
Returns:
bool: Whether the action is known and valid.
"""
return self._recipe_actions.ge
|
t(name, None) is not None
def is_valid_recipe(self, recipe_name):
"""Check if a recipe is known and valid.
Args:
name (str): The name of the recipe to check
Returns:
bool: Whether the recipe is known and valid.
"""
return self._recipes.get(recipe_name, None) is not None
def add_recipe_folder(self, recipe_folder, whitelist=None):
"""Add all recipes inside a folder to this RecipeManager with an optional whitelist.
Args:
recipe_folder (str): The path to the folder of recipes to add.
whitelist (list): Only include files whose os.basename() matches something
on the whitelist
"""
if whitelist is not None:
whitelist = set(whitelist)
if recipe_folder == '':
recipe_folder = '.'
for yaml_file in [x for x in os.listdir(recipe_folder) if x.endswith('.yaml')]:
if whitelist is not None and yaml_file not in whitelist:
continue
recipe = RecipeObject.FromFile(os.path.join(recipe_folder, yaml_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
for ship_file in [x for x in os.listdir(recipe_folder) if x.endswith('.ship')]:
if whitelist is not None and ship_file not in whitelist:
continue
recipe = RecipeObject.FromArchive(os.path.join(recipe_folder, ship_file), self._recipe_actions, self._recipe_resources)
self._recipes[recipe.name] = recipe
def add_recipe_actions(self, recipe_actions):
"""Add additional valid recipe actions to RecipeManager
args:
recipe_actions (list): List of tuples. First value of tuple is the classname,
second value of tuple is RecipeAction Object
"""
for action_name, action in recipe_actions:
self._recipe_actions[action_name] = action
def get_recipe(self, recipe_name):
"""Get a recipe by name.
Args:
recipe_name (str): The name of the recipe to fetch. Can be either the
yaml file name or the name of the recipe.
"""
if recipe_name.endswith('.yaml'):
recipe = self._recipes.get(RecipeObject.FromFile(recipe_name, self._recipe_actions, self._recipe_resources).name)
else:
recipe = self._recipes.get(recipe_name)
if recipe is None:
raise RecipeNotFoundError("Could not find recipe", recipe_name=recipe_name, known_recipes=[x for x in self._recipes.keys()])
return recipe
|
ondoheer/GOT-Platform
|
app/frontend/views.py
|
Python
|
gpl-2.0
| 197
| 0.005076
|
from flask import Blueprint, re
|
nder_template
frontend = Blueprint('frontend', __name__)
@frontend.route('/'
|
)
@frontend.route('/index')
def index():
return render_template('index.html')
|
nttcom/eclcli
|
eclcli/orchestration/heatclient/v1/events.py
|
Python
|
apache-2.0
| 2,990
| 0
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import encodeutils
import six
from six.moves.urllib import parse
from eclcli.orchestration.heatclient.common import utils
from eclcli.orchestration.heatclient.openstack.common.apiclient import base
from eclcli.orchestration.heatclient.v1 import stacks
DEFAULT_PAGE_SIZE = 20
class Event(base.Resource):
def __repr__(self):
return "<Event %s>" % self._info
def update(self, **fields):
self.manager.update(self, **fields)
def delete(self):
return self.manager.delete(self)
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
class EventManager(stacks.StackChildManager):
resource_class = Event
def list(self, stack_id, resource_name=None, **kwargs):
"""Get a list of events.
:param stack_id: ID of stack the events belong to
:param resource_name: Optional name of resources to filter events by
:rtype: list of :class:`Event`
"""
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
for key, value in six.iteritems(kwargs):
if value:
params[key] = value
if resource_name is None:
url = '/stacks/%s/events' % stack_id
else:
stack_id = self._resolve_stack_id(stack_id)
url = '/stacks/%s/resources/%s/events' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''))
if par
|
ams:
url += '?%s' % parse.urlencode(params, True)
return self._list(url, 'events')
def get(self, stack_id, resource_name, event_id):
"""Get the details for a specific event.
:param stack_id: ID of stack containing the event
:param resource_name: ID of resource the event belongs t
|
o
:param event_id: ID of event to get the details for
"""
stack_id = self._resolve_stack_id(stack_id)
url_str = '/stacks/%s/resources/%s/events/%s' % (
parse.quote(stack_id, ''),
parse.quote(encodeutils.safe_encode(resource_name), ''),
parse.quote(event_id, ''))
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return Event(self, body.get('event'))
|
lyso/scrape_realestate
|
parser_mysql.py
|
Python
|
gpl-3.0
| 10,544
| 0.000759
|
import time
import zlib
from bs4 import BeautifulSoup
# from geopy.geocoders import Nominatim as Geo
from scraper import BaseScraper
from price_parser import parse_price_text
from MySQL_connector import db_connector
db = 'realestate_db'
class Parser(object):
scr_db = 'scraper_dumps'
tgt_db = 'realestate_db'
def __init__(self):
self.html = ""
self.address = ""
self.hash_id = 0
self.property_type = ""
self.sub_type = ""
self.ad_id = ""
self.ad_url = ""
self.postcode = ""
self.state = ""
self.price_text = ""
self.open_date = ""
self.room_bed = None
self.room_bath = None
self.room_car = None
self.create_date = ""
self.last_seen_date = ""
self.raw_ad_text = ""
self.price = None
self.agent_name = ""
self.agent_company = ""
self._tgt_db_conn = db_connector(self.tgt_db)
self.cur = self._tgt_db_conn.cursor()
self.write_queue_len = 0
pass
@staticmethod
def _fetchonedict(cur):
data = cur.fetchone()
if data:
rs = {}
for i in range(len(data)):
col = cur.description[i][0]
d = data[i]
rs[col] = d
return rs
else:
return None
def extract_html_text(self, line_num=1000):
"""
query html from source database
call parse function to parse html to structured data
call insert function to insert to target database
:return:
"""
tic = time.time()
# get the parsed list of hash id
conn = db_connector(self.tgt_db)
cur = conn.cursor()
cur.execute("SELECT hash_id FROM tbl_property_ad")
parsed_hash_id = set()
while True:
res = cur.fetchone()
if res:
parsed_hash_id.add(res[0])
else:
break
pass
conn = db_connector(self.scr_db)
cur = conn.cursor()
cur.execute("SELECT * FROM tbl_html_text LIMIT %s", (line_num,))
i = 0
try:
while True:
# each row of data
i += 1
if not(i % 1000):
print "processing %d lines of data. (%f sec)\r" % (i, time.time()-tic)
tic = time.time()
rs = self._fetchonedict(cur)
if isinstance(rs, dict):
# get address only for the first version
# if rs['hash_id'] in parsed_hash_id:
# continue
self.html = zlib.decompress(str(rs["html_text"])).decode("utf-8")
self.hash_id = rs['hash_id']
self.create_date = rs["create_date"]
self.last_seen_date = rs["last_seen_date"]
self.raw_ad_text = rs["ad_text"]
else:
break
# call parse
self.parse_html_text()
self.insert_data()
finally:
self._tgt_db_conn.commit()
self._tgt_db_conn.close()
print "Saving and closing connection."
def parse_html_text(self):
soup = BeautifulSoup(self.html, "html.parser")
# get type
article = soup.article
try:
self.property_type = article["data-content-type"]
except (AttributeError, KeyError):
self.property_type = ""
# get ad id
self.ad_id = ""
try:
self.ad_id = article["id"]
except (AttributeError, KeyError):
self.ad_id = ""
# get url
self.ad_url = ""
if self.ad_id:
url = article.find("a")['href']
assert isinstance(url, basestring)
while url:
if url[0] == "/" and url.find(self.ad_id[1:]):
break
url = article.find("a")['href']
self.ad_url = "www.realestate.com.au"+url
# get subtype
self.sub_type = ""
if self.ad_url:
url_component = url.split("-")
self.sub_type = url_component[1]
# get address
photoviewer = soup.find("div", class_="photoviewer")
if photoviewer:
img = photoviewer.find("img")
try:
self.address = img['title']
except (KeyError, AttributeError):
self.address = ""
print "Could not found address, hash id:", self.hash_id
pass
# what if could not find address in the phtoviewer
# get postcode
self.postcode = ""
if self.address:
postcode = self.address[-4:].strip()
if postcode.isdigit():
self.postcode = postcode
# get state
self.state = ""
if self.postcode:
t = self.address.split(",")
t = t[-1]
state = t.strip().split(" ")[0]
self.state = state.upper()
# get price text
self.price_text = ""
self.price = None
price_text = article.find("p", class_="priceText")
if not price_text:
price_text = article.find("p", class_="contactAgent")
if not price_text:
price_text = article.find("span", class_="price rui-truncate")
if price_text:
self.price_text = price_text.get_text()
self.price = parse_price_text(self.price_text)
if not isinstance(self.price, float):
self.price = None
# todo li, class='badge openTime'
# s = article.find("li", class_="badge openTime")
# if s:
# print s.get_text(), len(article.find_all("li", class_="badge openTime"))
# get rooms
self.room_bed = None
self.room_bath = None
self.room_car = None
rooms = article.find("dl", class_="rui-property-features rui-clearfix")
if rooms:
room_text = rooms.get_text()
# print room_text, "===>", self._parse_rooms(room_text)
self.room_bed, self.room_bath, self.room_car = self._parse_rooms(room_text)
def _parse_rooms(self, room_text):
"""
:return: [1,2,3] for [bed,bath,car]
"""
assert isinstance(room_text, basestring)
rooms = [None, None, None]
s = room_text.split(" ")
while s:
text = s.pop(0)
if text == "Bedrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[0] = num
elif text == "Bathrooms":
num = s[0]
if num.isdigit():
s.pop(0)
rooms[1] = num
elif text == "Car":
if s[0] == "Spaces":
s.pop(0)
num = s[0]
if num.isdigit():
s.pop(0)
rooms[2] = num
return room
|
s
def test_db(self):
conn = db_connector(db)
cur = conn.cursor()
cur.execute(
""" CREATE TABLE IF NOT EXISTS`tbl_pr
|
operty_ad` (
`id` INT NOT NULL,
`hash_id` INT NOT NULL,
`address` VARCHAR(100) NULL,
`price` INT NULL,
`price_text` VARCHAR(100) NULL,
`agent_name` VARCHAR(45) NULL,
`agent_company` VARCHAR(45) NULL,
`raw_list_text` VARCHAR(255) NULL,
`room.bed` INT NULL,
`room.bath` INT NULL,
`room.car` INT NULL,
`type` VARCHAR(45) NULL,
`subtype` VARCHAR(45) NULL,
`lat` DECIMAL NULL,
`long` DECIMAL NULL,
`address_normalized` VARCHAR(100) NULL,
`state` VARCHAR(10) NULL,
`postcode` VARCHAR
|
Mendelone/forex_trading
|
Algorithm.Python/PythonPackageTestAlgorithm.py
|
Python
|
apache-2.0
| 6,403
| 0.016872
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
# Libraries included with basic python install
from bisect import bisect
import cmath
import collections
import copy
import functools
import heapq
import itertools
import math
import operator
import pytz
import Queue
import re
import time
import zlib
# Third party libraries added with pip
from sklearn.ensemble import RandomForestClassifier
import blaze # includes sqlalchemy, odo
import numpy
import scipy
import cvxopt
import cvxpy
from pykalman import KalmanFilter
import statsmodels.api as sm
import talib
from copulalib.copulalib import Copula
import theano
import xgboost
from arch import arch_model
from keras.models import Sequential
from keras.layers import Dense, Activation
import tensorflow as tf
class PythonPackageTestAlgorithm(QCAlgorithm):
'''Algorithm to test third party libraries'''
def Initialize(self):
self.SetStartDate(2013, 10, 7) #Set Start Date
self.SetStartDate(2013, 10, 7) #Set End Date
self.AddEquity("SPY", Resolution.Daily)
# numpy test
print "numpy test >>> print numpy.pi: " , numpy.pi
# scipy test:
print "scipy test >>> print mean of 1 2 3 4 5:", scipy.mean(numpy.array([1, 2, 3, 4, 5]))
#sklearn test
print "sklearn test >>> default RandomForestClassifier:", RandomForestClassifier()
# cvxopt matrix test
print "cvxopt >>>", cvxopt.matrix([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], (2,3))
# talib test
print "talib test >>>", talib.SMA(numpy.random.random(100))
# blaze test
blaze_test()
# cvxpy test
cvxpy_test()
# statsmodels test
statsmodels_test()
# pykalman test
pykalman_test()
# copulalib test
copulalib_test()
# theano test
theano_test()
# xgboost test
xgboost_test()
# arch test
arch_test()
# keras test
keras_test()
# tensorflow test
tensorflow_test()
def OnData(self, data): pass
def blaze_test():
accounts = blaze.symbol('accounts', 'var * {id: int, name: string, amount: int}')
deadbeats = accounts[accounts.amount < 0].name
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
print "blaze test >>>", list(blaze.compute(deadbeats, L))
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect(breakpoints, score)
return grades[i]
def cvxpy_test():
numpy.random.seed(1)
n = 10
mu = numpy.abs(numpy.random.randn(n, 1))
Sigma = numpy.random.randn(n, n)
Sigma = Sigma.T.dot(Sigma)
w = cvxpy.Variable(n)
gamma = cvxpy.Parameter(sign='positive')
ret = mu.T*w
risk = cvxpy.quad_form(w, Sigma)
print "csvpy test >>> ", cvxpy.Problem(cvxpy.Maximize(ret - gamma*risk),
[cvxpy.sum_entries(w) == 1,
w >= 0])
def statsmodels_test():
nsample = 100
x = numpy.linspace(0, 10, 100)
X = numpy.column_stack((x, x**2))
b
|
eta = numpy.array([1, 0.1, 10])
e = numpy.random.normal(size=nsample)
X = sm.add_constant(X)
y = numpy.dot(X, beta) + e
model = sm.OLS(y, X)
results = model.fit()
print "statsmodels tests >>>", results.summary()
def pykalman_test():
kf = KalmanFilter(transition_matrices = [[1, 1], [0, 1]], observation_matrices = [[0.1, 0.5], [-0.3, 0.0]])
measurements = numpy.asarr
|
ay([[1,0], [0,0], [0,1]]) # 3 observations
kf = kf.em(measurements, n_iter=5)
print "pykalman test >>>", kf.filter(measurements)
def copulalib_test():
x = numpy.random.normal(size=100)
y = 2.5 * x + numpy.random.normal(size=100)
#Make the instance of Copula class with x, y and clayton family::
print "copulalib test >>>", Copula(x, y, family='clayton')
def theano_test():
a = theano.tensor.vector() # declare variable
out = a + a ** 10 # build symbolic expression
f = theano.function([a], out) # compile function
print "theano test >>>", f([0, 1, 2])
def xgboost_test():
data = numpy.random.rand(5,10) # 5 entities, each contains 10 features
label = numpy.random.randint(2, size=5) # binary target
print "xgboost test >>>", xgboost.DMatrix( data, label=label)
def arch_test():
r = numpy.array([0.945532630498276,
0.614772790142383,
0.834417758890680,
0.862344782601800,
0.555858715401929,
0.641058419842652,
0.720118656981704,
0.643948007732270,
0.138790608092353,
0.279264178231250,
0.993836948076485,
0.531967023876420,
0.964455754192395,
0.873171802181126,
0.937828816793698])
garch11 = arch_model(r, p=1, q=1)
res = garch11.fit(update_freq=10)
print "arch test >>>", res.summary()
def keras_test():
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(12, activation='relu', input_shape=(11,)))
# Add one hidden layer
model.add(Dense(8, activation='relu'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
print "keras test >>>", model
def tensorflow_test():
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
sess = tf.Session()
node3 = tf.add(node1, node2)
print "tensorflow test >>>", "sess.run(node3): ", sess.run(node3)
|
moccu/django-omnibus
|
examples/mousemove/example_project/connection.py
|
Python
|
bsd-3-clause
| 485
| 0.002062
|
from omnibus.factories import websocket_connection_factory
def mousemove_connection_factory(auth_class, pubsub):
class GeneratedConnection(websocket_connection_factory(auth_class, pubsub)):
def close_connection(self):
self.pubsub.publish(
'mousemoves', 'disconnect',
sender=self.authenticator.get_identifier()
)
return super(GeneratedConnection, self).close_connection()
return Generated
|
Connection
|
|
dumrauf/web_tools
|
image_converter/tests/test_views.py
|
Python
|
mit
| 3,078
| 0.001949
|
from django.test import Client
import mock as mock
from image_converter.tests.base import ImageConversionBaseTestCase
from image_converter.utils.convert_image import convert_image_to_jpeg
__author__ = 'Dominic Dumrauf'
class ViewsTestCase(ImageConversionBaseTestCase):
"""
Tests the 'views'.
"""
def test_upload_get(self):
"""
Tests GETting the form initially.
"""
# Given
c = Client()
# When
response = c.get('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def test_upload_post_without_file(self):
"""
Tests POSTing a form which *lacks* a file.
"""
# Given
c = Client()
# When
response = c.post('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertFormError(response, 'form', 'file', 'This field is required.')
self.assertEqual(response.status_code, 200)
self.assertIn(
|
'form', response.context)
def test_upload_post_with_non_image_file(self):
"""
Tests POSTing a form which contains a file but the file is not an image.
"""
# Given
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='unsupported_
|
image_file_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
def test_upload_post_with_image_file(self):
"""
Tests POSTing a form which contains a file where the file is an image.
"""
# Given
c = Client()
# When
with open(self.image_file_path) as fp:
response = c.post('/', {'file': fp})
converted_image = convert_image_to_jpeg(fp)
# Then
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}.jpg'.format(self.image_file_name))
self.assertEqual(response.content, converted_image.getvalue())
@mock.patch('image_converter.views.convert_image_to_jpeg')
def test_unexpected_error_in_image_conversion_handling(self, convert_image_to_jpeg):
"""
Tests POSTing a form where converting the image raises an unexpected exception.
"""
# Given
convert_image_to_jpeg.side_effect = Exception()
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='generic_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
|
leafclick/intellij-community
|
python/helpers/pydev/_pydevd_frame_eval/pydevd_frame_eval_cython_wrapper.py
|
Python
|
apache-2.0
| 1,343
| 0.002234
|
try:
try:
from _pydevd_frame_eval_ext import pydevd_frame_evaluator as mod
except ImportError:
from _pydevd_frame_eval import pydevd_frame_evaluator as mod
except ImportError:
try:
import sys
try:
is_64bits = sys.maxsize > 2 ** 32
except:
#
|
In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_64bits:
plat = '64'
# We also acc
|
ept things as:
#
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_27_32
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_frame_evaluator_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_frame_eval.%s' % (mod_name,)
mod = __import__(check_name)
mod = getattr(mod, mod_name)
except ImportError:
raise
frame_eval_func = mod.frame_eval_func
stop_frame_eval = mod.stop_frame_eval
dummy_trace_dispatch = mod.dummy_trace_dispatch
get_thread_info_py = mod.get_thread_info_py
clear_thread_local_info = mod.clear_thread_local_info
|
atodorov/pykickstart
|
pykickstart/commands/module.py
|
Python
|
gpl-2.0
| 4,566
| 0.001314
|
#
# Martin Kolman <mkolman@redhat.com>
#
# Copyright 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
#
|
copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the
|
GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
from pykickstart.base import BaseData, KickstartCommand
from pykickstart.errors import KickstartParseError
from pykickstart.options import KSOptionParser
from pykickstart.version import F29
from pykickstart.i18n import _
class F29_ModuleData(BaseData):
removedKeywords = BaseData.removedKeywords
removedAttrs = BaseData.removedAttrs
def __init__(self, *args, **kwargs):
BaseData.__init__(self, *args, **kwargs)
self.name = kwargs.get("name", "")
self.stream = kwargs.get("stream", "")
def __eq__(self, y):
if not y:
return False
return (self.name == y.name and self.stream == y.stream)
def __ne__(self, y):
return not self == y
def __str__(self):
retval = BaseData.__str__(self)
retval += "module --name=%s --stream=%s" % (self.name, self.stream)
return retval.strip() + "\n"
class F29_Module(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.moduleList = kwargs.get("moduleList", [])
self.op = self._getParser()
def __str__(self):
retval = ""
for module in self.moduleList:
retval += module.__str__()
return retval
def _getParser(self):
op = KSOptionParser(prog="module", description="""
The module command makes it possible to manipulate
modules.
(In this case we mean modules as introduced by the
Fedora modularity initiative.)
A module is defined by a unique name and a stream id,
where single module can (and usually has) multiple
available streams.
Streams will in most cases corresponds to stable
releases of the given software components
(such as Node.js, Django, etc.) but there could be
also other use cases, such as a raw upstream master
branch stream or streams corresponding to an upcoming
stable release.
For more information see the Fedora modularity
initiative documentation:
https://docs.pagure.org/modularity/""", version=F29)
op.add_argument("--name", metavar="<module_name>", version=F29, required=True,
help="""
Name of the module to enable.""")
op.add_argument("--stream", metavar="<module_stream_name>", version=F29, required=False,
help="""
Name of the module stream to enable.""")
return op
def parse(self, args):
(ns, extra) = self.op.parse_known_args(args=args, lineno=self.lineno)
if len(extra) > 0:
msg = _("The enable module command does not take position arguments!")
raise KickstartParseError(msg, lineno=self.lineno)
enable_module_data = self.dataClass() # pylint: disable=not-callable
self.set_to_obj(ns, enable_module_data)
enable_module_data.lineno = self.lineno
return enable_module_data
def dataList(self):
return self.moduleList
@property
def dataClass(self):
return self.handler.ModuleData
|
naturalmessage/natmsgshardbig
|
sql/ArchiveNM.py
|
Python
|
gpl-3.0
| 2,018
| 0.018335
|
# Python 3: ArchiveNM.py
# Function:
# This will collect the files in /home/postgres that
# need to be sent to a new Natural Message machine
# that is being initialized. This currently grabs
# directory server and shard server files.
# It can also be used as an archiver.
import datetime
import tarfile
import os
i
|
mport sys
# For the version code, enter the format used
# in the naturalmsg_svr_#_#_#.py files
test_or_prod = 'prod'
version = '0_0_5'
DSTAMP = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
# (do not add a trailing slash on directory names)
pgm_dir = '/var/nat
|
msg'
sql_dir = '/home/postgres/shard/sql/' + test_or_prod
function_dir = '/home/postgres/shard/sql/' + test_or_prod + '/functions'
pgm_files = ('naturalmsg-svr' + version + '.py',
'shardfunc_cp' + version + '.py')
sql_files = ( \
'0001create_db.sh',
'0002create_tables.sql',
'0005shardserver.sql',
'0007shardbig.sql',
'0020payment.sql',
'0500sysmon.sql',
'blog01.sql' \
)
function_files = ( \
'nm_blog_entry_newest.sql',
'read_inbasket_stage010.sql',
'read_inbasket_stage020.sql',
'read_inbasket_stage030.sql',
'scan_shard_delete.sql',
'shard_burn.sql',
'shard_delete_db_entries.sql',
'shard_delete.sql',
'shard_expire_big.sql',
'shard_expire.sql',
'shard_id_exists.sql',
'smd_create0010.sql',
'sysmon001.sql' \
)
tar_fname_base = 'NatMsgSQLArchive' + version
tar_fname = tar_fname_base + '.tar'
if os.path.isfile(tar_fname):
# The tar file already exists, rename it
try:
os.renames(tar_fname, tar_fname_base + '-' + DSTAMP + '.tar')
except:
print('Error renaming an existing tar file: ' + tar_fname)
print('Maybe you do not have permission.')
sys.exit(12)
t = tarfile.TarFile(tar_fname, mode='w')
for f in pgm_files:
# the full path is already specified in the file list.
t.add(os.path.normpath(pgm_dir + '/' + f))
for f in sql_files:
t.add(os.path.normpath(sql_dir + '/' + f))
for f in function_files:
t.add(os.path.normpath(function_dir + '/' + f))
t.close()
|
HubbeKing/Hubbot_Twisted
|
hubbot/user.py
|
Python
|
mit
| 629
| 0
|
class IRCUser(object):
def __init__(self, user):
self.user = None
self.hostmask = None
self.last_active = None
if "!" in user:
user_array = user.split("!")
self.name = user_array[0]
if len(user_array) > 1:
user_array = user_array[1].split("@")
self.user = user_array[0]
self.hostmask = user_array[1]
|
else:
self.name = user
self.user = "anon"
self.hostmask = "unknown"
|
def __str__(self):
return "{}!{}@{}".format(self.name, self.user, self.hostmask)
|
jjdmol/LOFAR
|
CEP/GSM/src/ms3_script.py
|
Python
|
gpl-3.0
| 2,005
| 0.002494
|
#!/usr/bin/python
import sys, os, time
from itertools import count
import logging
import tkp.database.database as database
import tkp.database.dataset as ds
import tkp.database.dbregion as reg
import tkp.database.utils as dbu
import monetdb.sql
from tkp.sourcefinder import image
from tkp.config import config
from tkp.utility import accessors, containers
db_enabled = config['database']['enabled']
db_host = config['database']['host']
db_user = config['database']['user']
db_passwd = config['database']['password']
d
|
b_dbase = config['database']['name']
db_port = config['database']['port']
db_autocommit = config['database']['autocommit']
basedir = config['test']['datapath']
imagesdir = basedir + '/fits'
regionfilesdir = basedir + '/regions'
if db_enabled:
db = database.DataBase(host=db_host, name=db_dbase, user=db_user, password=db_passwd, port=db_port, autocommit=db_autocommit)
try:
iter_start = time.time()
if db_enabled:
description = 'TRAPPED: LOFAR flare stars'
dataset = ds.
|
DataSet(data={'dsinname': description}, database=db)
print "dataset.id:", dataset.id
i = 0
files = os.listdir(imagesdir)
files.sort()
for file in files:
my_fitsfile = accessors.FitsFile(imagesdir + '/' + file)
my_image = accessors.sourcefinder_image_from_accessor(my_fitsfile)
#print "type(my_image):",type(my_image)
print "\ni: ", i, "\nfile: ", file
if db_enabled:
dbimg = accessors.dbimage_from_accessor(dataset, my_fitsfile)
print "dbimg.id: ", dbimg.id
results = my_image.extract()
print results
if db_enabled:
dbu.insert_extracted_sources(db.connection, dbimg.id, results)
dbu.associate_extracted_sources(db.connection, dbimg.id)
dbu.associate_with_catalogedsources(db.connection, dbimg.id)
my_image.clearcache()
i += 1
db.close()
except db.Error, e:
print "Failed for reason: %s " % (e,)
raise
|
marmstr93ng/TimeManagementSystem
|
tms/breakrule.py
|
Python
|
mit
| 2,292
| 0.004363
|
import logging
import configparser
import os
from utils import bool_query
class BreakRule(object):
def __init__(self, settings):
self.settings = settings
self.rules_record = configparser.ConfigParser()
self.rules_record.read("{}/tms/breakrules.ini".format(os.getcwd()))
self.rules = {}
for rule_id in self.rules_record.sections():
self.rules[rule_id] = self.rules_record.get(rule_id, "Description")
def _check_rule_exists(self, rule_id):
if self.rules.get(rule_id, None) is None:
logging.warning("Rule {} doesn't exist".format(rule_id))
return False
else:
logging.debug("Rule {} exists".format(rule_id))
return True
def _update_break_rule(self, rule_id):
self.settings.set("Settings", "BreakRule", rule_id)
with open("{}/tms/settings.ini".format(os.getcwd()), 'w') as configfile:
self.settings.write(configfile)
logging.info("Break rule changed to rule {}".format(self.settings.get("Settings", "BreakRule")))
def print_rules(self):
logging.info("Break Rules: ")
for rule_id in self.rules:
logging.info(' [{}] {}'.format(rule_id, self.rules[rule_id]))
def get_break_rule(self, desired_rule_id=None):
if not desired_rule_id: desired_rule_id = self.settings.get("Settings", "BreakRule")
if self._check_rule_exists(desired_rule_id):
for rule_id in self.rules:
if rule_id == desired_rule_id:
logging.info(' [{}] {}'.format(rule_id, self.rules[desired_rule_id]))
def cmd_update_break_rule(self):
self.print_rules()
selection_query = None
while selection_query is None:
logging.info('Please enter the ID of the
|
rule to be used...')
selection = input()
|
try:
int(selection)
except ValueError:
logging.warning('WARNING: Please enter a numeric value corresponding to a rule ID.')
else:
if self._check_rule_exists(selection):
selection_query = bool_query('Select Rule "{}" for use?'.format(selection, default="y"))
self._update_break_rule(selection)
|
pombredanne/osrc
|
osrc/manage.py
|
Python
|
mit
| 536
| 0
|
# -*- coding: utf-8 -*-
__all__ = [
"CreateTablesCommand", "DropTablesCommand", "UpdateCommand",
]
from flask.ext.script import Command, Option
from .mo
|
dels import db
from .update import update
class CreateTablesCommand(Command):
def run(self):
db.create_all()
class DropTablesCommand(Command):
def run(self):
db.drop_all()
class UpdateCommand(Command):
option_list = (
Option("-s", "--since", dest="since", required=False),
)
|
def run(self, since):
update(since=since)
|
3Nigma/jale
|
main.py
|
Python
|
mit
| 3,579
| 0.00475
|
import random
def printifyInstruction(instr, mcs):
"""
Construct a preaty representation of the instruction in memory.
mcs -> 'maximum characters span'
"""
return "({0:{3}d}, {1:{3}d}, {2:{3}d})".format(instr['A'], instr['B'], instr['C'], mcs)
class Orbis:
def __init__(self, gSize):
self.pc = 0
self.instructions = []
self.gsize = gSize
for i in range(gSize * 3):
if (i % 3) != 2:
# We are either on operand A or operand B initialization branch.
self.instructions.append(random.randrange(0, gSize * 3))
else:
# We are on the address C initialization branch.
self.instructions.append(random.randrange(0, gSize) * 3)
def shock(se
|
lf):
self.pc = 0
for g in range(self.gsize):
print "Evaluating gene {0} ...".format(self.pc / 3)
ta = self.instructions[g * 3]
tb = self.instructions[g * 3 + 1]
|
tc = self.instructions[g * 3 + 2]
cstem = self.instructions[tb] - self.instructions[ta]
if (tb % 3) == 2:
# We will affect the jump part of a gene. Make sure it remains consistent with the rest of the genes
cvtor = cstem % 3
prevtc = self.instructions[tb]
if cvtor == 0:
# The current value is a valid gene address. It's Ok to use it
self.instructions[tb] = cstem % (self.gsize * 3)
elif cvtor == 1:
# The current value is closer to the lower side
self.instructions[tb] = (cstem - 1) % (self.gsize * 3)
else:
# The current value is closer to the upper side
self.instructions[tb] = (cstem + 1) % (self.gsize * 3)
else:
# We are in the data domain. Just ensure that the resulting numerals are bounded to the current information domain
self.instructions[tb] = cstem % (self.gsize * 3)
if self.instructions[tb] >= self.gsize * 3:
raise IndexError("Invalid C address generated! Previous C value was {0} while cvtor was {1}".format(prevtc, cvtor))
if self.instructions[tb] <= tc:
self.pc = tc
else:
self.pc = self.pc + 3
def getInstruction(self, addr):
if addr >= (self.gsize * 3) or (addr % 3) != 0:
raise Exception("The address supplied is not valid!")
return {'A': self.instructions[addr], 'B': self.instructions[addr + 1], 'C': self.instructions[addr + 2]}
def __str__(self):
orbisPrintString = ""
instrRealAddress = 0
maxGeneCharPrintCount = len(str(len(self.instructions)))
for i in range(self.gsize):
orbisPrintString = orbisPrintString + '{0:{3}d}. [{1:{3}d}] {2}\n'.format(i, i * 3, printifyInstruction(self.getInstruction(i * 3), maxGeneCharPrintCount), maxGeneCharPrintCount)
instrRealAddress += 3
return orbisPrintString
if __name__ == "__main__":
x = Orbis(256)
print 'Original orbis: \n', x
print 'Shocking the world...'
for i in range(100):
print "Shock nr. {0} ...".format(i)
try:
x.shock()
except IndexError as e:
print "IndexError message received! World evaluation halted."
print "Exception message: {0}".format(e.args)
print x
exit()
print x
|
fyookball/electrum
|
lib/daemon.py
|
Python
|
mit
| 14,615
| 0.001779
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ast
import os
import time
import sys
# from jsonrpc import JSONRPCResponseManager
import jsonrpclib
from .jsonrpc import VerifyingJSONRPCServer
from .version import PACKAGE_VERSION
from .network import Network
from .util import (json_decode, DaemonThread, print_error, to_string,
standardize_path)
from .wallet import Wallet
from .storage import WalletStorage
from .commands import known_commands, Commands
from .simple_config import SimpleConfig
from .exchange_rate import FxThread
def get_lockfile(config):
return os.path.
|
join(config.path, 'daemon')
def remove_lockfile(lockfile):
try:
os.unlink(lockfile)
print_error("Removed lockfile:", lockfile)
|
except OSError as e:
print_error("Could not remove lockfile:", lockfile, repr(e))
def get_fd_or_server(config):
'''Tries to create the lockfile, using O_EXCL to
prevent races. If it succeeds it returns the FD.
Otherwise try and connect to the server specified in the lockfile.
If this succeeds, the server is returned. Otherwise remove the
lockfile and try again.'''
lockfile = get_lockfile(config)
limit = 5 # prevent infinite looping here. Give up after 5 attempts.
latest_exc = None
for n in range(limit):
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except PermissionError as e:
sys.exit(f"Unable to create lockfile due to file system permission problems: {e}")
except NotADirectoryError as e:
lockdir = os.path.dirname(lockfile)
sys.exit(f"Electron Cash directory location at {lockdir} is not a directory. Error was: {e}")
except OSError as e:
''' Unable to create -- this is normal if there was a pre-existing lockfile '''
latest_exc = e
server = get_server(config)
if server is not None:
return None, server
# Couldn't connect; remove lockfile and try again.
remove_lockfile(lockfile)
sys.exit(f"Unable to open/create lockfile at {lockfile} after {limit} attempts. Please check your filesystem setup. Last error was: {repr(latest_exc)}")
def get_server(config, timeout=2.0):
assert timeout > 0.0
lockfile = get_lockfile(config)
while True:
create_time = None
try:
with open(lockfile) as f:
(host, port), tmp_create_time = ast.literal_eval(f.read())
create_time = float(tmp_create_time); del tmp_create_time # ensures create_time is float; raises if create_time is not-float-compatible
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except Exception as e:
print_error("[get_server]", e)
# Note that the create_time may be in the future if there was a clock
# adjustment by system ntp, etc. We guard against this, with some
# tolerance. The net effect here is in normal cases we wait for the
# daemon, giving up after timeout seconds (or at worst timeout*2 seconds
# in the pathological case of a clock adjustment happening
# at the precise time the daemon was starting up).
if not create_time or abs(time.time() - create_time) > timeout:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config):
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
import ecdsa, base64
bits = 128
nbytes = bits // 8 + (bits % 8 > 0)
pw_int = ecdsa.util.randrange(pow(2, bits))
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbytes, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '':
from .util import print_stderr
print_stderr('WARNING: RPC authentication is disabled.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
def __init__(self, config, fd, is_gui, plugins):
DaemonThread.__init__(self)
self.plugins = plugins
self.config = config
if config.get('offline'):
self.network = None
else:
self.network = Network(config)
self.network.start()
self.fx = FxThread(config, self.network)
if self.network:
self.network.add_jobs([self.fx])
self.gui = None
self.wallets = {}
# Setup JSONRPC server
self.init_server(config, fd, is_gui)
def init_server(self, config, fd, is_gui):
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 0)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
self.print_error('Warning: cannot initialize RPC server on host', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
server.register_function(self.run_gui, 'gui')
server.register_function(self.run_daemon, 'daemon')
self.cmd_runner = Commands(self.config, None, self.network)
for cmdname in known_commands:
server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
server.register_function(self.run_cmdline, 'run_cmdline')
def ping(self):
return True
def run_daemon(self, config_options):
config = SimpleConfig(config_options)
sub = config.get('subcommand')
subargs = config.get('subargs')
plugin_cmd = self.plugins and self.plugins.daemon_commands.get(sub)
if subargs and sub in [None, 'start', 'stop', 'status']:
return "Unexpected arguments: {!r}. {!r} takes no options.".format(subargs, sub)
if subargs and sub in ['load_wallet', 'close_wallet']:
return "Unexpected arguments: {!r}. Provide options to {!r} using the -w and -wp options.".format(subargs, sub)
if sub in [None, 'start']:
response = "Daemon already r
|
RafaelSzefler/phi
|
tests/unit/request/test_form.py
|
Python
|
mit
| 834
| 0
|
# -*- coding: utf-8 -*-
from io import BytesIO
import pytest
from phi.request.form import FormRequest
class TestFormRequest(object):
@pytest.fixture
def form_req(self):
fr = FormRequest()
fr.charset = "utf-8"
return fr
@pytest.mark.parametrize("body, content", [
(
"name=test&blah=asdfdasf+&check=on",
{"blah": "asdfdasf ", "name": "test", "check": "on"}
),
(
"name=test&blah=asdfdasf+&check=on",
{"blah": "asdfdasf ", "name": "test", "check": "on"}
)
|
,
])
def test_body(self, body, content, form_req):
stream = BytesIO(body.encode("utf-8"))
stream.seek(0)
form_req._content_stream = stream
form_req.content_length = len(
|
body)
assert form_req._get_body() == content
|
shumik/skencil-c
|
Sketch/UI/linedlg.py
|
Python
|
gpl-2.0
| 11,272
| 0.016856
|
# Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999, 2001, 2002 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# A dialog for specifying line properties
#
import operator
from X import LineDoubleDash
from Sketch.const import JoinMiter, JoinBevel, JoinRound,\
CapButt, CapProjecting, CapRound
from Sketch.Lib import util
from Sketch import _, Trafo, SimpleGC, SolidPattern, EmptyPattern, \
StandardDashes, StandardArrows, StandardColors
from Tkinter import Frame, Label, IntVar, LEFT, X, E, W, GROOVE
from tkext import ColorButton, UpdatedCheckbutton, MyOptionMenu2
from sketchdlg import StylePropertyPanel
from lengthvar import create_length_entry
import skpixmaps
pixmaps = skpixmaps.PixmapTk
def create_bitmap_image(tk, name, bitmap):
data = util.xbm_string(bitmap)
tk.call(('image', 'create', 'bitmap', name, '-foreground', 'black',
'-data', data, '-maskdata', data))
return name
_thickness = 3
_width = 90
def draw_dash_bitmap(gc, dashes):
scale = float(_thickness)
if dashes:
dashes = map(operator.mul, dashes, [scale] * len(dashes))
dashes = map(int, map(round, dashes))
for idx in range(len(dashes)):
length = dashes[idx]
if length <= 0:
dashes[idx] = 1
elif length > 255:
dashes[idx] = 255
else:
dashes = [_width + 10, 1]
gc.SetDashes(dashes)
gc.DrawLine(0, _thickness / 2, _width, _thickness / 2)
def create_dash_images(tk, tkwin, dashes):
bitmap = tkwin.CreatePixmap(_width, _thickness, 1)
gc = bitmap.CreateGC(foreground = 1, background = 0,
line_style = LineDoubleDash, line_width = _thickness)
images = []
for dash in dashes:
draw_dash_bitmap(gc, dash)
image = create_bitmap_image(tk, 'dash_' + `len(images)`, bitmap)
images.append((image, dash))
return gc, bitmap, images
_arrow_width = 31
_arrow_height = 25
_mirror = Trafo(-1, 0, 0, 1, 0, 0)
def draw_arrow_bitmap(gc, arrow, which = 2):
gc.gc.foreground = 0
gc.gc.FillRectangle(0, 0, _arrow_width + 1, _arrow_height + 1)
gc.gc.foreground = 1
y = _arrow_height / 2
if which == 1:
gc.PushTrafo()
gc.Concat(_mirror)
gc.DrawLineXY(0, 0, -1000, 0)
if arrow is not None:
arrow.Draw(gc)
if which == 1:
gc.PopTrafo()
def create_arrow_images(tk, tkwin, arrows):
arrows = [None] + arrows
bitmap = tkwin.CreatePixmap(_arrow_width, _arrow_height, 1)
gc = SimpleGC()
gc.init_gc(bitmap, foreground = 1, background = 0, line_width = 3)
gc.Translate(_arrow_width / 2, _arrow_height / 2)
gc.Scale(2)
images1 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 1)
image = create_bitmap_image(tk, 'arrow1_' + `len(images1)`, bitmap)
images1.append((image, arrow))
images2 = []
for arrow in arrows:
draw_arrow_bitmap(gc, arrow, 2)
image = create_bitmap_image(tk, 'arrow2_' + `len(images2)`, bitmap)
images2.append((image, arrow))
return gc, bitmap, images1, images2
class LinePanel(StylePropertyPanel):
title = _("Line Style")
def __init__(self, master, main_window, doc):
StylePropertyPanel.__init__(self, master, main_window, doc,
name = 'linedlg')
def build_dlg(self):
top = self.top
button_frame = self.create_std_buttons(top)
button_frame.grid(row = 5, columnspan = 2, sticky = 'ew')
color_frame = Frame(top, relief = GROOVE, bd = 2)
color_frame.grid(row = 0, columnspan = 2, sticky = 'ew')
label = Label(color_frame, text = _("Color"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.color_but = ColorButton(color_frame, width = 3, height = 1,
command = self.set_line_color)
self.color_but.SetColor(StandardColors.black)
self.color_but.pack(side = LEFT, expand = 1, anchor = W)
self.var_color_none = IntVar(top)
check = UpdatedCheckbutton(color_frame, text = _("None"),
variable = self.var_color_none,
command = self.do_apply)
check.pack(side = LEFT, expand = 1)
width_frame = Frame(top, relief = GROOVE, bd = 2)
width_frame.grid(row = 1, columnspan = 2, sticky = 'ew')
label = Label(width_frame, text = _("Width"))
label.pack(side = LEFT, expand = 1, anchor = E)
self.var_width = create_length_entry(top, width_frame,
self.set_line_width,
scroll_pad = 0)
tkwin = self.main_window.canvas.tkwin
gc, bitmap, dashlist = create_dash_images(self.top.tk, tkwin,
StandardDashes())
self.opt_dash = MyOptionMenu2(top, dashlist, command = self.set_dash,
entry_type = 'image',
highlightthickness = 0)
self.opt_dash.grid(row = 2, columnspan = 2, sticky = 'ew', ipady = 2)
self.dash_gc = gc
self.dash_bitmap = bitmap
gc, bitmap, arrow1, arrow2 = create_arrow_images(self.top.tk, tkwin,
StandardArrows())
self.opt_arrow1 = MyOptionMenu2(top, arrow1, command = self.set_arrow,
args = 1, entry_type = 'image',
highlightthickness = 0)
self.opt_arrow1.grid(row = 3, column = 0, sticky = 'ew', ipady = 2)
self.opt_arrow2 = MyOptionMenu2(top, arrow2, command = self.set_arrow,
args = 2, entry_type = 'image',
highlightthickness = 0)
self
|
.opt_arrow2.grid(row = 3, column = 1, sticky = 'ew', ipady = 2)
self.arrow_gc = gc
self.arrow_bitmap = bitmap
self.opt_join = MyOptionMenu2(top, [(pixmaps.JoinMiter, JoinMiter),
|
(pixmaps.JoinRound, JoinRound),
(pixmaps.JoinBevel, JoinBevel)],
command = self.set_line_join,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_join.grid(row = 4, column = 0, sticky = 'ew')
self.opt_cap = MyOptionMenu2(top,
[(pixmaps.CapButt, CapButt),
(pixmaps.CapRound, CapRound),
(pixmaps.CapProjecting, CapProjecting)],
command = self.set_line_cap,
entry_type = 'bitmap',
highlightthickness = 0)
self.opt_cap.grid(row = 4, column = 1, sticky = 'ew')
self.opt_cap.SetValue(None)
def close_dlg(self):
StylePropertyPanel.close_dlg(self)
self.var_width = None
def init_from_style(self, style):
if style.HasLine():
self.var_color_none.set(0)
self.opt_join.SetValue(style.line_join)
self.opt_cap.SetValue(style.line_cap)
self.color_but.SetColor(style.line_pattern.Color())
|
laalaguer/pythonlearn
|
01-basic/math_improved.py
|
Python
|
mit
| 1,306
| 0.004594
|
# We try to improve the previous 'math_operation.py' by reduce the code
# here we introduce a concept named list-comprehensive
# Knowledge points:
# 1. list-comprehensive, the [x for x in a-list]
# 2. dir() function to get the current environment variable names in space.
# 3. "in" check, we never user string.search() in Java, but use "in" to check string existence.
# 4. "eval()" function to run the expression
# still, we expect user to input something
user_input = raw_input("Please input a sequence of numbers, like: 1 2 3.1 2.1 -3 9: \
|
n")
# Split user_inputed numbers into individua
|
l numbers, store it in a list.
possible_numbers = user_input.strip().split(" ")
# We user list comprehensive to get rid of "for" loop, reduce code amount.
float_numbers = [float(x) for x in possible_numbers]
# absolute numbers
absolute_numbers = [abs(x) for x in float_numbers]
# rounded numbers, in "int" style
int_numbers = [int(round(x)) for x in float_numbers]
import math
# floored numbers
floored_numbers = [math.floor(x) for x in float_numbers]
# ceilled numbers
ceil_numbers = [math.ceil(x) for x in float_numbers]
# alright, lets try to print smartly about all the numbers we have
# use the function "dir()"
env_variables = dir()
for var in env_variables:
if "_numbers" in var:
print var, ":", eval(var)
|
6desislava6/PyDay
|
pyday_alarms/urls.py
|
Python
|
mit
| 336
| 0
|
from django.conf.urls import url
from django.conf.urls import patterns
from pyday_alarms import views
app_name = 'pyday_alarms'
urlpatt
|
erns = [
url(r'^alarms/$', views.AlarmView.as_view(), name
|
='alarms'),
]
'''urlpatterns += patterns('pyday_social_network.views',
url(r'^list/$', 'list', name='list'))
'''
|
etherkit/OpenBeacon2
|
client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-google.cloud.speech.py
|
Python
|
gpl-3.0
| 603
| 0.003317
|
#------------------------------------------------------------
|
-----------------
# Copyright (c) 2017-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from Py
|
Installer.utils.hooks import copy_metadata
datas = copy_metadata('google-cloud-speech')
|
geeag/kafka
|
tests/kafkatest/tests/client/message_format_change_test.py
|
Python
|
apache-2.0
| 4,644
| 0.004522
|
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, TRUNK, KafkaVersion
class MessageFormatChangeTest(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(MessageFormatChangeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 100
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, new_consumer=False, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
@parametrize(producer_version=str(TRUNK), consumer_version=str(TRUNK))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
""" This tests performs the following checks:
The workload is a mix of 0.9.x and 0.10.x producers and consumers
that produce to and consume from a 0.10.x cluster
1. initially the topic is using message format 0.9.0
2. change the message format version for topic to 0.10.0 on the fly.
3. change the message format version for topic back to 0.9.0 on the fly.
- The producers and consumers should not have any issue.
- Note that for 0.9.x consumers/producers we only do steps 1 and 2
"""
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=TRUNK, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.kafka.start()
self.logger.info("First format change to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group1")
self.logger.info("Second format change to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group2
|
")
if producer_version == str(TRUNK) and consumer_version == str(TRUNK):
|
self.logger.info("Third format change back to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group3")
|
alexgorban/models
|
official/vision/detection/modeling/retinanet_model.py
|
Python
|
apache-2.0
| 6,957
| 0.003881
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from absl import logging
import tensorflow.compat.v2 as tf
from tensorflow.python.keras import backend
from official.vision.detection.dataloader import mode_keys
from official.vision.detection.evaluation import factory as eval_factory
from official.vision.detection.modeling import base_model
from official.vision.detection.modeling import losses
from official.vision.detection.modeling.architecture import factory
from official.vision.detection.ops import postprocess_ops
class RetinanetModel(base_model.Model):
"""RetinaNet model function."""
def __init__(self, params):
super(RetinanetModel, self).__init__(params)
# For eval metrics.
self._params = params
# Architecture generators.
self._backbone_fn = factory.backbone_generator(params)
self._fpn_fn = factory.multilevel_features_generator(params)
self._head_fn = factory.retinanet_head_generator(params.retinanet_head)
# Loss function.
self._cls_loss_fn = losses.RetinanetClassLoss(params.retinanet_loss)
self._box_loss_fn = losses.RetinanetBoxLoss(params.retinanet_loss)
self._box_loss_weight = params.retinanet_loss.box_loss_weight
self._keras_model = None
# Predict function.
self._generate_detections_fn = postprocess_ops.MultilevelDetectionGenerator(
params.postprocess)
self._transpose_input = params.train.transpose_input
assert not self._transpose_input, 'Transpose input is not supportted.'
# Input layer.
input_shape = (
params.retinanet_parser.output_size +
[params.retinanet_parser.num_channels])
self._input_layer = tf.keras.layers.Input(
shape=input_shape, name='',
dtype=tf.bfloat16 if self._use_bfloat16 else tf.float32)
def build_outputs(self, inputs, mode):
# If the input image is transposed (from NHWC to HWCN), we need to revert it
# back to the original shape before it's used in the computation.
if self._transpose_input:
inputs = tf.transpose(inputs, [3, 0, 1, 2])
backbone_features = self._backbone_fn(
inputs, is_training=(mode == mode_keys.TRAIN))
fpn_features = self._fpn_fn(
backbone_features, is_training=(mode == mode_keys.TRAIN))
cls_outputs, box_outputs = self._head_fn(
fpn_features, is_training=(mode == mode_keys.TRAIN))
if self._use_bfloat16:
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
model_outputs = {
'cls_outputs': cls_outputs,
'box_outputs': box_outputs,
}
return model_outputs
def build_loss_fn(self):
if self._keras_model is None:
raise ValueError('build_loss_fn() must be called after build_model().')
filter_fn = self.make_filter_trainable_variables_fn()
trainable_variables = filter_fn(self._keras_model.trainable_variables)
def _total_loss_fn(labels, outputs):
cls_loss = self._cls_loss_fn(outputs['cls_outputs'],
labels['cls_targets'],
labels['num_positives'])
box_loss = self._box_loss_fn(outputs['box_outputs'],
labels['box_targets'],
labels['num_positives'])
model_loss = cls_loss + self._box_loss_weight * box_loss
l2_regularization_loss = self.weight_decay_loss(self._l2_weight_decay,
trainable_variables)
total_loss = model_loss + l2_regularization_loss
return {
'total_loss': total_loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
'l2_regularization_loss': l2_regularization_loss,
}
return _total_loss_fn
def build_model(self, params, mode=None):
if self._keras_model is None:
with backend.get_graph().as_default():
outputs = self.model_outputs(self._input_layer, mode)
model = tf.keras.models.Model(
inputs=self._input_layer, outputs=outputs, name='retinanet')
assert model is not None, 'Fail to build tf.keras.Model.'
model.optimizer = self.build_optimizer()
self._keras_model = model
return self._keras_model
def post_processing(self, labels, outputs):
# TODO(yeqing): Moves the output related part into build_outputs.
required_output_fields = ['cls_outputs', 'box_outputs']
for field in required_output_fields:
if field not in outputs:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_output_fields, outputs.keys())
required_label_fields = ['image_info', 'groundtruths']
for field in required_label_fields:
if field not in labels:
raise ValueError('"%s" is missing in outputs, requried %s found %s',
field, required_l
|
abel_fields, labels.keys())
boxes, scores, classes, valid_detections = self._generate_detections_fn(
outputs['box_outputs'], outputs['cls_outputs'],
labels['anchor_boxes'], labels['image_info'][:, 1:2, :])
# Discards the old output tensors t
|
o save memory. The `cls_outputs` and
# `box_outputs` are pretty big and could potentiall lead to memory issue.
outputs = {
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info'],
'num_detections': valid_detections,
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
}
if 'groundtruths' in labels:
labels['source_id'] = labels['groundtruths']['source_id']
labels['boxes'] = labels['groundtruths']['boxes']
labels['classes'] = labels['groundtruths']['classes']
labels['areas'] = labels['groundtruths']['areas']
labels['is_crowds'] = labels['groundtruths']['is_crowds']
return labels, outputs
def eval_metrics(self):
return eval_factory.evaluator_generator(self._params.eval)
|
edent/Twitter-Networks
|
GenerateNetwork.py
|
Python
|
mit
| 1,614
| 0.005576
|
import glob
import os
import json
import sys
import argparse
from collections import defaultdict
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--screen-name", required=True, help="Screen name of twitter user")
args = vars(ap.parse_args())
SEED = args['screen_name']
users = defaultdict(lambda: { 'followers': 0 })
for f in glob.glob('twitter-users/*.json'):
print "loading " + str(f)
data = json.load(file(f))
screen_name = data['screen_name']
users[screen_name] = { 'followers': data['followers_count'], 'id':data['id'] }
def process_follower_list(screen_name, edges=[], depth=0, max_depth=5):
f = os.path.join('following', screen_name + '.csv')
print "processing " + str(f)
if not os.path.exists(f):
return edges
followers = [line.strip().split('\t') for line in file(f)]
for follower_data in followers:
if len(follower_data) < 2:
continue
screen_name_2 = follower_data[1]
# use the number of followers for screen_name as the weight
weight = users[screen_name]['followers']
edges.append([users[screen_name]['id'], follower_data[0], weight])
if depth+1 < max_depth:
process_follower_list(screen_name_2, edges, depth+1, max_depth)
return edges
edges = process_follower_list(SEED, max_depth=5)
with open('twit
|
ter_network.csv', 'w') as outf:
edge_exists = {}
for edge in edges:
key = ','.join([str(x) for x in edge])
if no
|
t(key in edge_exists):
outf.write('%s,%s,%d\n' % (edge[0], edge[1], edge[2]))
edge_exists[key] = True
|
ntt-sic/python-glanceclient
|
glanceclient/v2/shell.py
|
Python
|
apache-2.0
| 10,082
| 0.000099
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient import exc
import json
import os
from os.path import expanduser
IMAGE_SCHEMA = None
def get_image_schema():
global IMAGE_SCHEMA
if IMAGE_SCHEMA is None:
schema_path = expanduser("~/.glanceclient/image_schema.json")
if os.path.exists(schema_path) and os.path.isfile(schema_path):
with file(schema_path, "r") as f:
schema_raw = f.read()
IMAGE_SCHEMA = json.loads(schema_raw)
return IMAGE_SCHEMA
@utils.schema_args(get_image_schema)
@utils.arg('--property', metavar="<key=value>", action='append',
default=[], help=('Arbitrary property to associate with image.'
' May be used multiple times.'))
def do_image_create(gc, args):
"""Create a new image."""
schema = gc.schemas.get("image")
_args = [(x[0].replace('-', '_'), x[1]) for x in vars(args).items()]
fields = dict(filter(lambda x: x[1] is not None and
(x[0] == 'property' or
schema.is_core_property(x[0])),
_args))
raw_properties = fields.pop('property', [])
for datum in raw_properties:
key, value = datum.split('=', 1)
fields[key] = value
image = gc.images.create(**fields)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems()
if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to update.')
@utils.schema_args(get_image_schema, omit=['id'])
@utils.arg('--property', metavar="<key=value>", action='append',
default=[], help=('Arbitrary property to associate with image.'
' May be used multiple times.'))
@utils.arg('--remove-property', metavar="key", action='append', default=[],
help="Name of arbitrary property to remove from the image")
def do_image_update(gc, args):
"""Update an existing image."""
schema = gc.schemas.get("image")
_args = [(x[0].replace('-', '_'), x[1]) for x in vars(args).items()]
fields = dict(filter(lambda x: x[1] is not None and
(x[0] in ['property', 'remove_property'] or
schema.is_core_property(x[0])),
_args))
raw_properties = fields.pop('property', [])
for datum in raw_properties:
key, value = datum.split('=', 1)
fields[key] = value
remove_properties = fields.pop('remove_property', None)
image_id = fields.pop('id')
image = gc.images.update(image_id, remove_properties, **fields)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems()
if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('--page-size', metavar='<SIZE>', default=None, type=int,
help='Number of images to request in each paginated request.')
@utils.arg('--visibility', metavar='<VISIBILITY>',
help='The visibility of the images to display.')
@utils.arg('--member-status', metavar='<MEMBER_STATUS>',
help='The status of images to display.')
@utils.arg('--owner', metavar='<OWNER>',
help='Display images owned by <OWNER>.')
@utils.arg('--checksum', metavar='<CHECKSUM>',
help='Display images matching the checksum')
@utils.arg('--tag', metavar='<TAG>', action='append',
help="Filter images by an user-defined tag.")
def do_image_list(gc, args):
"""List images you can access."""
filter_keys = ['visibility', 'member_status', 'owner', 'checksum', 'tag']
filter_items = [(key, getattr(args, key)) for key in filter_keys]
filters = dict([item for item in filter_items if item[1] is not None])
kwargs = {'filters': filters}
if args.page_size is not None:
kwargs['page_size'] = args.page_size
images = gc.images.list(**kwargs)
columns = ['ID', 'Name']
utils.print_list(images, columns)
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_image_show(gc, args):
"""Describe a specific image."""
image = gc.images.get(args.id)
ignore = ['self', 'access', 'file', 'schema']
image = dict([item for item in image.iteritems() if item[0] not in ignore])
utils.print_dict(image)
@utils.arg('--image-id', metavar='<IMAGE_ID>', required=True,
help='Image to display members of.')
def do_member_list(gc, args):
"""Describe sharing permissions by image."""
members = gc.image_members.list(args.image_id)
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(members, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image from which to remove member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to remove as member')
def do_member_delete(gc, args):
"""Delete image member"""
if not (args.image_id and args.member_id):
utils.exit('Unable to delete member. Specify image_id and member_id')
else:
gc.image_members.delete(args.image_id, args.member_id)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image from which to update member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to update')
@utils.arg('member_status', metavar='<MEMBER_STATUS>',
help='Updated status of member')
def do_member_update(gc, args):
"""Update the status of a member for a given image."""
if not (args.image_id and args.member_id and args.member_status):
utils.exit('Unable to update member. Specify image_id, member_id and'
' member_status'
|
)
else:
member = gc.image_members.update(args.image_id, args.member_id,
args.member_status)
member = [member]
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(member, columns)
@utils.arg('image_id', metavar='<IMAGE_ID>',
help='Image on which to create member')
@utils.arg('member_id', metavar='<MEMBER_ID>',
help='Tenant to add as member')
def do_member_create
|
(gc, args):
"""Create member for a given image."""
if not (args.image_id and args.member_id):
utils.exit('Unable to create member. Specify image_id and member_id')
else:
member = gc.image_members.create(args.image_id, args.member_id)
member = [member]
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list(member, columns)
@utils.arg('model', metavar='<MODEL>', help='Name of model to describe.')
def do_explain(gc, args):
"""Describe a specific model."""
try:
schema = gc.schemas.get(args.model)
except exc.HTTPNotFound:
utils.exit('Unable to find requested model \'%s\'' % args.model)
else:
formatters = {'Attribute': lambda m: m.name}
columns = ['Attribute', 'Description']
utils.print_list(schema.properties, columns, formatters)
@utils.arg('--file', metavar='<FILE>',
help='Local file to save downloaded image data to. '
'If this is not specified the image data will be '
'written to stdout.')
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to download.')
@utils.arg('--progress', action='store_true', default=False,
help='Show download progress bar.')
def do_image_download(gc, args):
"""Download a specific image."""
body =
|
Jacy-Wang/MyLeetCode
|
ClimbStairs70.py
|
Python
|
gpl-2.0
| 393
| 0
|
c
|
lass Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
nums = [0 for _ in xrange(n + 1)]
for i in x
|
range(1, n + 1):
if i == 1:
nums[1] = 1
elif i == 2:
nums[2] = 2
else:
nums[i] = nums[i - 1] + nums[i - 2]
return nums[n]
|
jlecker/rhythmbox-xchat-music-channel
|
xchat_music_channel/__init__.py
|
Python
|
mit
| 2,960
| 0.004392
|
import rb
import rhythmdb
import dbus
import gconf
from xchat_music_channel.conf import gconf_keys, ConfDialog
from dbus.mainloop.glib import DBusGMainLoop
DBusGMainLoop(set_as_default=True)
class XChatMusicChannelPlugin(rb.Plugin):
def activate(self, shell):
gc = gconf.client_get_default()
self.server = gc.get_string(gconf_keys['server'])
self.channel = gc.get_string(gconf_keys['channel'])
self.shell = shell
self.player = shell.get_player()
self.event_id = self.player.connect('playing-song-changed', self.song_changed)
self.bus = dbus.SessionBus()
self.signal = No
|
ne
self.xchat_object = None
self.xchat_hook = None
self.xchat_context = None
def deactivate(self, shell):
del self.xchat_context
if self.xchat_hook:
self.signal.remove()
self.get_xchat().Unhook(self.xchat_hook)
del self.xchat_hook
self.player.disconnect(self.event_id)
del self.event_id
|
del self.player
del self.shell
del self.channel
del self.server
del self.signal
del self.bus
def get_xchat(self):
xchat_object = self.bus.get_object('org.xchat.service', '/org/xchat/Remote')
return dbus.Interface(xchat_object, 'org.xchat.plugin')
def song_changed(self, player, entry):
xchat = self.get_xchat()
self.xchat_context = xchat.FindContext(self.server, self.channel)
if self.xchat_context:
try:
artist = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ARTIST)
title = self.shell.props.db.entry_get(entry, rhythmdb.PROP_TITLE)
album = self.shell.props.db.entry_get(entry, rhythmdb.PROP_ALBUM)
except TypeError:
return
xchat.SetContext(self.xchat_context)
xchat.Command('say Playing: %s - %s (%s)' % (artist, title, album))
if not self.xchat_hook:
self.xchat_hook = xchat.HookPrint('Channel Message', 0, 0)
self.signal = self.bus.add_signal_receiver(
self.got_message,
'PrintSignal',
'org.xchat.plugin',
'org.xchat.service',
'/org/xchat/Remote'
)
elif self.xchat_hook:
self.signal.remove()
xchat.Unhook(self.xchat_hook)
self.xchat_hook = None
def got_message(self, data, priority, context):
if context == self.xchat_context:
msg = str(data[1])
if msg == 'next':
self.player.do_next()
def create_configure_dialog(self, dialog=None):
if not dialog:
builder_file = self.find_file('conf.ui')
dialog = ConfDialog(builder_file).dialog
dialog.present()
return dialog
|
anchor/make-magic
|
tools/lint.py
|
Python
|
bsd-3-clause
| 3,918
| 0.026289
|
#! /usr/bin/env python
'''lint for sets of items
There are many things that really shouldn't exist with a set of items. One
of the biggest is that the dependencies should form a directed acyclic graph,
with absolutely no cycles.
e.g. given the set of items {a.b,c}, if a depends on b, b depends on c,
and c depends on a, there is a cycle in the dependency graph, which means
there is no valid order to satisfy the dependencies in:
a -> b -> c
^ |
`-------------'
Fig 1: Badness
This set of lint tools helps check for this and more. The bigger the set of
items and the more complex the dependencies in them, the more likely it
is that humans are going to miss something. This is no substitute for
humans putting in the correct data in the first place (like all lint tools,
this isn't going to pick up most errors, just some of them), but it should
help pick up some dire ones
'''
import core.bits
class LintError(Exception): pass
def check_dependencies_are_instances(item):
'''check that the item, contents, and dependencies of the item, are instances not classes
This is very important for task instances. You should never mix references between
Item instances in a task and the classes that they are built from. Obvious as this
seems, with the way that marshalling and unmarshalling happens, it is possible if there
are bugs in the loader or marshaller
We could check that they were instances of BaseItem, but that would be pretty un-pythonic
raises LintError if the item is of type 'type'
raises LintError unless all members of item.depends are not of type 'type'
raises LintError unless all members of item.contents are not of type 'type'
'''
if type(item) == type:
raise LintError("item is not an instance type",item)
for dep in item.depends:
if type(dep) == type:
raise LintError("item dependency is not an instance type",item,dep)
contains = getattr(item, 'contains', None)
if contains is not None:
for dep in item.contains:
if type(dep) == type:
raise LintError("group content is not an instance type",item,dep)
def check_predicate_returns_boolean(item):
'''return true iff an item's predicate returns something True or False
TODO: Figure out if this is a good idea. It's not very pythonic
'''
ret = item.predicate([])
if ret is not True and ret is not False:
raise LintError('item predicate does not return True or False', item, item.predicate)
if __name__ == "__main__":
class TestItem(object):
predicate = lambda x: True
depends = ()
try: # should fail
check_dependencies_are_instances(TestItem)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
check_dependencies_are_instances(TestItem())
try: # should fail
testinst = TestItem()
testinstb = TestItem()
testinst.depends = (testinstb, TestItem)
check_dependencies_are_instances(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
|
testinst = TestItem()
testinstb = TestItem()
testinst.depends = (testinstb,)
check_dependencies_are_instances(testinst)
try: # should fail
testinst = TestItem()
testinstb = TestItem()
testinst.contains = (testinstb, TestItem)
check_dependencies_are_instances(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# Should be fine
testinst = TestItem()
testinstb = TestItem()
testinst.
|
contains = (testinstb,)
check_dependencies_are_instances(testinst)
try: # should fail
testinst = TestItem()
testinst.predicate = lambda x: "Oh joy"
check_predicate_returns_boolean(testinst)
raise Exception("Didn't catch obvious lint error")
except LintError: pass
# SHould be fine
testinst = TestItem()
testinst.predicate = lambda x: True
check_predicate_returns_boolean(testinst)
testinst.predicate = lambda x: False
check_predicate_returns_boolean(testinst)
|
gsehub/edx-platform
|
lms/djangoapps/learner_dashboard/tests/test_programs.py
|
Python
|
agpl-3.0
| 9,854
| 0.00203
|
# -*- coding: utf-8 -*-
"""
Unit tests covering the program listing and detail pages.
"""
import json
import re
from urlparse import urljoin
from uuid import uuid4
import mock
from bs4 import BeautifulSoup
from django.conf import settings
from django.urls import reverse, reverse_lazy
from django.test import override_settings
from lms.envs.test import CREDENTIALS_PUBLIC_SERVICE_URL
from openedx.core.djangoapps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.credentials import STUDENT_RECORDS_FLAG
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
PROGRAMS_UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
@skip_unless_lms
@override_settings(MKTG_URLS={'ROOT': 'https://www.example.com'})
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
"""Unit tests for the program listing page."""
shard = 4
maxDiff = None
password = 'test'
url = reverse_lazy('program_listing_view')
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(cls.course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
cls.second_program = ProgramFactory(courses=[course])
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@classmethod
def program_sort_key(cls, program):
"""
Helper function used to sort dictionaries representing programs.
"""
return program['title']
def load_serialized_data(self, response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(r'{key}: (?P<data>\[.*\])'.format(key=key))
match = pattern.search(response.content)
serialized = match.group('data')
return json.loads(serialized)
def assert_dict_contains_subset(self, superset, subset):
"""
Verify that the dict superset contains the dict subset.
Works like assertDictContainsSubset, deprecated since Python 3.2.
See: https://docs.python.org/2.7/library/unittest.html#unittest.TestCase.assertDictContainsSubset.
"""
superset_keys = set(superset.keys())
subset_keys = set(subset.keys())
intersection = {key: superset[key] for key in superset_keys & subset_keys}
self.assertEqual(subset, intersection)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_empty_state(self, mock_get_programs):
"""
Verify that the response contains no programs data when no programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
response = self.client.get(self.url)
self.assertContains(response, 'programsData: []')
def test_programs_listed(self, mock_get_programs):
"""
Verify that the response contains accurate programs data when programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
self.assert_dict_contains_subset(actual_program, expected_program)
def test_program_discovery(self, mock_get_programs):
"""
Verify that a link to a programs marketing page appears in the response.
"""
self.create_programs_config(marketing_path='bar')
mock_get_programs.return_value = self.data
marketing_root = urljoin(settings.MKTG_URLS.get('ROOT'), 'bar').rstrip('/')
response = self.client.get(self.url)
self.assertContains(response, marketing_root)
def test_links_to_detail_pages(self, mock_get_programs):
"""
Verify that links to detail pages are present.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
expected_url = reverse('program_details_view', kwargs={'program_uuid': expected_program['uuid']})
self.assertEqual(actual_program['detail_url'], expected_url)
@skip_unless_lms
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
@override_waffle_flag(STUDENT_RECORDS_FLAG, active=True)
class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, SharedModuleStoreTestCase):
"""Unit tests for the program details page."""
shard = 4
program_uuid = str(uuid4())
password = 'test'
url = reverse_lazy('program_details_view', kwargs={'program_uuid': program_uuid})
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(modulestore_course.id))
course = CourseFactory(course_runs=[course_run])
cls.data = Prog
|
ramFactory(uuid=cls.program_uuid, courses=[course])
def setUp(self):
super(TestProgramDetails, self).setUp()
self.user = UserFactory()
self.client.login(usern
|
ame=self.user.username, password=self.password)
def assert_program_data_present(self, response):
"""Verify that program data is present."""
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response,
'"program_record_url": "{}/records/programs/'.format(CREDENTIALS_PUBLIC_SERVICE_URL))
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.data['title'])
self.assert_programs_tab_present(response)
def assert_programs_tab_present(self, response):
"""Verify that the programs tab is present in the nav."""
soup
|
Azure/azure-sdk-for-python
|
sdk/compute/azure-mgmt-vmwarecloudsimple/azure/mgmt/vmwarecloudsimple/__init__.py
|
Python
|
mit
| 706
| 0.002833
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft
|
(R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost i
|
f the code is regenerated.
# --------------------------------------------------------------------------
from ._vmware_cloud_simple import VMwareCloudSimple
from ._version import VERSION
__version__ = VERSION
__all__ = ['VMwareCloudSimple']
try:
from ._patch import patch_sdk # type: ignore
patch_sdk()
except ImportError:
pass
|
super1337/Super1337-CTF
|
questionnaire/urls.py
|
Python
|
mit
| 299
| 0.003344
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/$', views.quiz, name='quiz'),
url(r'^(?P<quiz_slug>[-A-Za-z0-9_]+)/(?P<question_slug>[-A-Za-z0-9_]+)/$', views.question, name='question'
|
)
]
|
|
goodmami/minigraph
|
minigraph.py
|
Python
|
mit
| 16,121
| 0.003102
|
import warnings
from collections import namedtuple, defaultdict
#Node = namedtuple('Node', ('id', 'data', 'edges', 'in_edges'))
#Edge = namedtuple('Edge', ('start', 'end', 'label', 'data', 'directed'))
class MiniGraphError(Exception): pass
class MiniGraphWarning(Warning): pass
# todo: consider functools.lru_cache for the retrieval methods
class MiniGraph(object):
__slots__ = ('_graph',)
def __init__(self, nodes=None, edges=None):
self._graph = {}
# nodes
if nodes is None:
nodes = {}
self.add_nodes(nodes)
# edges
if edges is None:
edges = {}
self.add_edges(edges)
@classmethod
def fast_init(cls, nodes=None, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges1(edges)
return mg
@classmethod
def fast_init2(cls, nodes, edges=None):
"""
Initializes the graph without argument checking of edges, which
means that all edges must be 5-tuples of:
(start, end, label, data, directed)
Furthermore, all edges must only uses nodes specified in the
nodes argument.
"""
mg = cls(nodes)
if edges is not None:
mg._fast_add_edges2(edges)
return mg
def __getitem__(self, idx):
"""
Fancy graph queries:
- if idx is an integer, return the node given by idx
- if idx is a slice, return the edges matching
start:end:label. Note that not specifying the label uses
the label of None, which is a valid label. If you want to
consider all labels, use Ellipsis: (g[0:1:...]). All edges
can be retrieved with g[::...].
"""
try:
start, end, label = idx.start, idx.stop, idx.step
if label is Ellipsis:
return self.find_edges(start, end)
else:
return self.find_edges(start, end, label=label)
except AttributeError:
return (idx, self.nodes[idx])
def add_node(self, nodeid, data=None):
# if nodeid in self.nodes:
# raise MiniGraphError('Node already exists: {}'.format(nodeid))
#self.nodes[nodeid] = dict(data or [])
if data is None:
data = {}
if nodeid in self._graph:
self._graph[nodeid][1].update(data)
else:
self._graph[nodeid] = (nodeid, data, {}, {})
def add_nodes(self, nodes):
for node in nodes:
try:
node, data = node
except TypeError:
data = {}
self.add_node(node, data=data)
def remove_node(self, nodeid):
g = self._graph
if nodeid not in g:
raise KeyError(nodeid)
_prune_edges(g, nodeid)
del g[nodeid]
def node(self, nodeid):
return self._graph[nodeid]
def nodes(self):
return [(nid, n[1]) for nid, n in self._graph.items()]
def add_edge(self, start, end, label=None, data=None, directed=True):
self.add_edges([(start, end, label, data, directed)])
#@profile
def add_edges(self, edges):
g = self._graph
add_edge = _add_edge
for edge in edges:
edgelen = len(edge)
if edgelen == 5:
start, end, label, data, directed = edge
elif edgelen == 2:
start, end = edg
|
e; label = data = None; directed = True
elif edgelen == 4:
start, end, label, data = edge; directed = True
elif edgelen == 3:
start, end, label = edge; data = None; directed = True
else:
raise MiniGraphError('Invalid ed
|
ge: {}'.format(edge))
if data is None: data = {}
if start not in g: g[start] = (start, {}, {}, {})
if end not in g: g[end] = (end, {}, {}, {})
e = (start, end, label, data, directed)
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
#add_edge(g[start][3], label, end, e)
d = g[start][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
def _fast_add_edges1(self, edges):
g = self._graph
add_edge = _add_edge
for e in edges:
start = e[0]
end = e[1]
label = e[2]
directed = e[4]
if start not in g:
g[start] = (start, {}, {}, {})
if end not in g:
g[end] = (end, {}, {}, {})
#add_edge(g[start][2], label, end, e)
d = g[start][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if end not in innerdict:
innerdict[end] = e
else:
if innerdict[end][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[end][3].update(e[3])
#add_edge(g[end][3], label, start, e)
d = g[end][3]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if start not in innerdict:
innerdict[start] = e
else:
if innerdict[start][4] != e[4]:
raise MiniGraphError(
'Cannot update directed and undirected edges.'
)
innerdict[start][3].update(e[3])
if directed is False:
#add_edge(g[end][2], label, start, e)
d = g[end][2]
if label not in d:
d[label] = innerdict = {}
else:
innerdict = d[label]
if
|
Bathlamos/Project-Euler-Solutions
|
solutions/p052.py
|
Python
|
mit
| 451
| 0.035477
|
#
# Solution to Project Euler
|
problem 52
# Philippe Legault
#
# https://github.com/Bathlamos/Project-Euler-Solutions
import itertools
def compute():
c = 1
while True:
lists = [digits(c * n) for n in range(1, 7)]
if len(set(lists)) == 1: # Check that all elements are equal
return c
c += 1
def digits(n):
res = []
while n != 0:
res.append(n % 10)
n /= 10
return tuple(sorted(res))
if __name__ == "__main__":
print(co
|
mpute())
|
simontaylor81/Syrup
|
SRPTests/TestScripts/Python/RenderTarget.py
|
Python
|
mit
| 326
| 0.021472
|
# Test for creating custom render targets.
from SRPScripting import *
import utils
rt = ri.CreateRenderTarget()
te
|
stTexCallback = utils.GetTestTextureCallback(ri, rt, "FullscreenTexture_PS", "tex")
def RenderFrame(co
|
ntext):
context.Clear((1, 0.5, 0, 1), [rt])
testTexCallback(context)
ri.SetFrameCallback(RenderFrame)
|
alexgleith/Quantum-GIS
|
python/plugins/sextante/algs/ftools/Delaunay.py
|
Python
|
gpl-2.0
| 4,431
| 0.002257
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Delaunay.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from sets import Set
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.outputs.OutputVector import OutputVector
from sextante.algs.ftools import voronoi
class Delaunay(GeoAlgorithm):
INPUT = "INPUT"
OUTPUT = "OUTPUT"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/delaunay.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Delaunay triangulation"
self.group = "Vector geometry tools"
self.addParameter(ParameterVector(self.INPUT, "Input layer", ParameterVector.VECTOR_TYPE_POINT))
self.addOutput(OutputVector(self.OUTPUT, "Delaunay triangulation"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT))
fields = [QgsField("POINTA", QVariant.Double, "", 24, 15),
QgsField("POINTB", QVariant.Double, "", 24, 15),
QgsField("POINTC", QVariant.Double, "", 24, 15)
]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
QGis.WKBPolygon, layer.crs())
pts = []
ptDict = {}
ptNdx = -1
c = voronoi.Context()
features = QGisLayers.features(layer)
for inFeat in features:
geom = QgsGeometr
|
y(inFeat.geometry())
point = geom.asPoint()
x = point.x()
y = point.y()
|
pts.append((x, y))
ptNdx += 1
ptDict[ptNdx] = inFeat.id()
if len(pts) < 3:
raise GeoAlgorithmExecutionException("Input file should contain at least 3 points. Choose another file and try again.")
uniqueSet = Set(item for item in pts)
ids = [pts.index(item) for item in uniqueSet]
sl = voronoi.SiteList([voronoi.Site(*i) for i in uniqueSet])
c.triangulate = True
voronoi.voronoi(sl, c)
triangles = c.triangles
feat = QgsFeature()
current = 0
total = 100.0 / float(len(triangles))
for triangle in triangles:
indicies = list(triangle)
indicies.append(indicies[0])
polygon = []
attrs = []
step = 0
for index in indicies:
request = QgsFeatureRequest().setFilterFid(ptDict[ids[index]])
inFeat = layer.getFeatures(request).next()
geom = QgsGeometry(inFeat.geometry())
point = QgsPoint(geom.asPoint())
polygon.append(point)
if step <= 3:
attrs.append(ids[index])
step += 1
feat.setAttributes(attrs)
geometry = QgsGeometry().fromPolygon([polygon])
feat.setGeometry(geometry)
writer.addFeature(feat)
current += 1
progress.setPercentage(int(current * total))
del writer
|
mitodl/open-discussions
|
channels/migrations/0002_add_subscription.py
|
Python
|
bsd-3-clause
| 1,675
| 0.000597
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 19:08
from __future__ import unicode_literals
import channels.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("channels", "0001_add_tokens"),
]
operations = [
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("post_id", channels.models.Base36IntegerField()),
("comment_id"
|
, channels.models.Base36IntegerField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together=set([("user", "post_id", "comment_id")]),
),
migrations.Alt
|
erIndexTogether(
name="subscription", index_together=set([("post_id", "comment_id")])
),
]
|
ilay09/keystone
|
keystone/tests/unit/identity/backends/test_ldap.py
|
Python
|
apache-2.0
| 1,538
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import fixture as config_fixture
from keystone.identity.backends import ldap
from keystone.tests.unit import core
from keystone.tests.unit.identity.backends import test_base
from keystone.tests.unit.ksfixtures import ldapdb
class TestIdentityDriver(core.BaseTestCase,
|
test_base.IdentityDriverTests):
allows_name_update = False
allows_self_service_change_password = False
expected_is_domain_aware = False
expected_default_assignment_driver = 'sql'
expected_is_sql = False
expected_generates_uuids = False
def setUp(self):
super(TestIde
|
ntityDriver, self).setUp()
config_fixture_ = self.useFixture(config_fixture.Config())
config_fixture_.config(
group='ldap',
url='fake://memory',
user='cn=Admin',
password='password',
suffix='cn=example,cn=com')
self.useFixture(ldapdb.LDAPDatabase())
self.driver = ldap.Identity()
|
wukong-m2m/NanoKong
|
tools/demo20120423/showNodeStatus.py
|
Python
|
gpl-2.0
| 3,336
| 0.021882
|
#!/usr/bin/python
import sys
sys.path.append("/Users/niels/git/nanokong/tools/python")
import wkpf
from wkpf import WuObject
numericInputWuObject = WuObject(nodeId=1, portNumber=1, wuclassId=3)
lightSensorWuObject = WuObject(nodeId=1, portNumber=2, wuclassId=5)
thresholdWuObjectScenario1 = WuObject(nodeId=1, portNumber=3, wuclassId=1)
thresholdWuObjectScenario2 = WuObject(nodeId=3, portNumber=3, wuclassId=1)
occupancyWuObject = WuObject(nodeId=1, portNumber=5, wuclassId=0x1005)
andGateWuObject = WuObject(nodeId=3, portNumber=6, wuclassId=0x1006)
lightWuObject = WuObject(nodeId=3, portNumber=4, wuclassId=4)
wuobjectsNode1 = wkpf.getWuObjectList(1)
wuobjectsNode3 = wkpf.getWuObjectList(3)
wuclasses = wkpf.getWuClassList(3)
if 0x1006 in wuclasses: # Scenario 2
light_sensor_value = wkpf.getProperty(lightSensorWuObject, propertyNumber=0)
input_value = wkpf.getProperty(numericInputWuObject, propertyNumber=0)
threshold_operator = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=0)
threshold_threshold = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=1)
threshold_value = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=2)
threshold_output = wkpf.getProperty(thresholdWuObjectScenario2, propertyNumber=3)
occupancy_value = wkpf.getProperty(occupancyWuObject, propertyNumber=0)
andgate_in1 = wkpf.getProperty(andGateWuObject, propertyNumber=0)
andgate_in2 = wkpf.getProperty(andGateWuObject, propertyNumber=1)
andgate_out = wkpf.getProperty(andGateWuObject, propertyNumber=2)
light_value = wkpf.getProperty(lightWuObject, propertyNumber=0)
print ""
print ""
print "=== Light sensor"
print "value:", light_sensor_value
print "=== Input"
prin
|
t "value:", input_value
print "=== Threshold"
print "operator:", threshold_operator
print "threshold:", threshold_threshold
print "value:", threshold_value
print "output:", threshold_output
print "=== Occupacy"
print "value:", occupancy_value
print "=== And Gate"
print "in1 (threshold):", andgate_in1
print "in2 (occupancy):", andgate_in2
print "value:", andgate_out
print "=== Light"
print "value:", light_value
print "=== WuObjects on node 1"
prin
|
t wuobjectsNode1
print "=== WuObjects on node 3"
print wuobjectsNode3
else: # Scenario 1
light_sensor_value = wkpf.getProperty(lightSensorWuObject, propertyNumber=0)
input_value = wkpf.getProperty(numericInputWuObject, propertyNumber=0)
threshold_operator = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=0)
threshold_threshold = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=1)
threshold_value = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=2)
threshold_output = wkpf.getProperty(thresholdWuObjectScenario1, propertyNumber=3)
light_value = wkpf.getProperty(lightWuObject, propertyNumber=0)
print ""
print ""
print "=== Light sensor"
print "value:", light_sensor_value
print "=== Input"
print "value:", input_value
print "=== Threshold"
print "operator:", threshold_operator
print "threshold:", threshold_threshold
print "value:", threshold_value
print "output:", threshold_output
print "=== Light"
print "value:", light_value
print "=== WuObjects on node 1"
print wuobjectsNode1
print "=== WuObjects on node 3"
print wuobjectsNode3
|
kfrodgers/active-mail-filter
|
active_mail_filter/simple_db.py
|
Python
|
bsd-2-clause
| 3,390
| 0.000295
|
# Copyright (c) 2016, Kevin Rodgers
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
import redis
from uuid import uuid4
UUID = 'uuid'
class SimpleRedisDb(object):
def __init__(self, host, key, port=6379):
"""
:param host: database host
:param port: database port default 6397
:param key: hash set name default active_mail_filter
"""
self.host = host
self.port = port
self.key = key
self.redis = None
def __str__(self):
return 'SimpleRedisDb [host=%s, port=%d, key=%s, redis=%s]' % \
(self.host, self.port, self.key, str(self.redis))
def __del__(self):
"""
Close database on delete of object
:return:
"""
self._close_db()
def _open_db(self):
"""
Opens/reopens database if necessary
:return:
"""
if self.redis is None:
self.redis = redis.Redis(connection_pool=redis.ConnectionPool(host=self.host, port=self.port, db=0))
def _close_db(self):
"""
Closes database
:return:
"""
if self.redis is not None:
del self.redis
self.redis = None
def _clear_all(self):
"""
Rem
|
oves all keys from hash set
:return:
"""
self._open_db()
record_keys = self.redis.hkeys(self.key)
|
for u in record_keys:
self.redis.hdel(self.key, u)
def get_record(self, record_key):
"""
Return record dictionary for specified UUID
:param record_key:
:return:
record dictionary or None if not found
"""
self._open_db()
record_str = self.redis.hget(self.key, record_key)
if record_str is None:
raise LookupError('%s key not found' % record_key)
return eval(record_str)
def get_all_records(self):
"""
Return a list of all records
:return:
list of all record dictionaries
"""
self._open_db()
all_records = []
record_keys = self.redis.hkeys(self.key)
for u in record_keys:
record_str = self.redis.hget(self.key, u)
if record_str is not None:
all_records.append(eval(record_str))
return all_records
def add_record(self, record_dict):
"""
Add a record to the hash set, auto generate UUID
:param record_dict: record dictionary
:return:
hash set key or UUID generated for volume dictionary
"""
self._open_db()
record_dict[UUID] = unicode(uuid4())
self.redis.hset(self.key, record_dict[UUID], unicode(record_dict))
return record_dict[UUID]
def delete_record(self, record_key):
"""
Delete record from hash set by UUID
:param record_key:
:return:
"""
self._open_db()
self.redis.hdel(self.key, record_key)
def update_record(self, record_key, record_dict):
"""
Update/replace record dictionary by UUID
:param record_key: UUID
:param record_dict: volume dictionary
:return:
"""
self._open_db()
record_dict[UUID] = record_key
self.redis.hset(self.key, record_dict[UUID], unicode(record_dict))
|
Aravinthu/odoo
|
addons/auth_signup/controllers/main.py
|
Python
|
agpl-3.0
| 6,625
| 0.003774
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import werkzeug
from odoo import http, _
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons.web.controllers.main import ensure_db, Home
from odoo.exceptions import UserError
from odoo.http import request
_logger = logging.getLogger(__name__)
class AuthSignupHome
|
(Home):
@http.r
|
oute()
def web_login(self, *args, **kw):
ensure_db()
response = super(AuthSignupHome, self).web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
return response
@http.route('/web/signup', type='http', auth='public', website=True, sitemap=False)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
# Send an account creation confirmation email
if qcontext.get('token'):
user_sudo = request.env['res.users'].sudo().search([('login', '=', qcontext.get('login'))])
template = request.env.ref('auth_signup.mail_template_user_signup_account_created', raise_if_not_found=False)
if user_sudo and template:
template.sudo().with_context(
lang=user_sudo.lang,
auth_login=werkzeug.url_encode({'auth_login': user_sudo.email}),
password=request.params.get('password')
).send_mail(user_sudo.id, force_send=True)
return super(AuthSignupHome, self).web_login(*args, **kw)
except UserError as e:
qcontext['error'] = str(e)
except (SignupError, AssertionError) as e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.error("%s", e)
qcontext['error'] = _("Could not create a new account.")
return request.render('auth_signup.signup', qcontext)
@http.route('/web/reset_password', type='http', auth='public', website=True, sitemap=False)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return super(AuthSignupHome, self).web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, _("No login provided.")
_logger.info(
"Password reset attempt for <%s> by user <%s> from %s",
login, request.env.user.login, request.httprequest.remote_addr)
request.env['res.users'].sudo().reset_password(login)
qcontext['message'] = _("An email has been sent with credentials to reset your password")
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception as e:
qcontext['error'] = str(e)
response = request.render('auth_signup.reset_password', qcontext)
response.headers['X-Frame-Options'] = 'DENY'
return response
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
get_param = request.env['ir.config_parameter'].sudo().get_param
return {
'signup_enabled': get_param('auth_signup.allow_uninvited') == 'True',
'reset_password_enabled': get_param('auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = request.params.copy()
qcontext.update(self.get_auth_signup_config())
if not qcontext.get('token') and request.session.get('auth_signup_token'):
qcontext['token'] = request.session.get('auth_signup_token')
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
qcontext['invalid_token'] = True
return qcontext
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = { key: qcontext.get(key) for key in ('login', 'name', 'password') }
if not values:
raise UserError(_("The form was not properly filled in."))
if values.get('password') != qcontext.get('confirm_password'):
raise UserError(_("Passwords do not match; please retype them."))
supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])]
if request.lang in supported_langs:
values['lang'] = request.lang
self._signup_with_values(qcontext.get('token'), values)
request.env.cr.commit()
def _signup_with_values(self, token, values):
db, login, password = request.env['res.users'].sudo().signup(values, token)
request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
uid = request.session.authenticate(db, login, password)
if not uid:
raise SignupError(_('Authentication Failed.'))
|
sergiohzlz/lectorcfdi
|
extrainfo.py
|
Python
|
apache-2.0
| 8,345
| 0.016906
|
#!/usr/bin/python
#-*-coding:utf8-*-
from bs4 import BeautifulSoup as Soup
#import pandas as pd
import glob
import sys
import re
"""
Version xml de cfdi 3.3
"""
class CFDI(object):
def __init__(self, f):
"""
Constructor que requiere en el parámetro una cadena con el nombre del
cfdi.
"""
fxml = open(f,'r').read()
soup = Soup(fxml,'lxml')
#============componentes del cfdi============
emisor = soup.find('cfdi:emisor')
receptor = soup.find('cfdi:receptor')
comprobante = soup.find('cfdi:comprobante')
tfd = soup.find('tfd:timbrefiscaldigital')
self.__version = comprobante['version']
self.__folio = comprobante['folio']
self.__uuid = tfd['uuid']
self.__fechatimbrado = tfd['fechatimbrado']
self.__traslados = soup.find_all(lambda e: e.name=='cfdi:traslado' and
sorted(e.attrs.keys())==['importe','impuesto','tasaocuota','tipofactor'])
self.__retenciones = soup.find_all(lambda e: e.name=='cfdi:retenc
|
ion' and
sorted(e.attrs.keys())==['importe','impuesto'])
#============emisor==========================
self.__emisorrfc = emisor['rfc']
try:
self.__emisornombre = emisor['nombre']
|
except:
self.__emisornombre = emisor['rfc']
#============receptor========================
self.__receptorrfc = receptor['rfc']
try:
self.__receptornombre = receptor['nombre']
except:
self.__receptornombre = receptor['rfc']
#============comprobante=====================
self.__certificado = comprobante['certificado']
self.__sello = comprobante['sello']
self.__total = round(float(comprobante['total']),2)
self.__subtotal = round(float(comprobante['subtotal']),2)
self.__fecha_cfdi = comprobante['fecha']
self.__conceptos = soup.find_all(lambda e: e.name=='cfdi:concepto')
self.__n_conceptos = len(self.__conceptos)
try:
self.__moneda = comprobante['moneda']
except KeyError as k:
self.__moneda = 'MXN'
try:
self.__lugar = comprobante['lugarexpedicion']
except KeyError as k:
self.__lugar = u'México'
tipo = comprobante['tipodecomprobante']
if(float(self.__version)==3.2):
self.__tipo = tipo
else:
tcomprobantes = {'I':'Ingreso', 'E':'Egreso', 'N':'Nomina', 'P':'Pagado'}
self.__tipo = tcomprobantes[tipo]
try:
self.__tcambio = float(comprobante['tipocambio'])
except:
self.__tcambio = 1.
triva, trieps, trisr = self.__calcula_traslados()
self.__triva = round(triva,2)
self.__trieps = round(trieps,2)
self.__trisr = round(trisr,2)
retiva, retisr = self.__calcula_retenciones()
self.__retiva = round(retiva,2)
self.__retisr = round(retisr,2)
def __str__(self):
"""
Imprime el cfdi en el siguiente orden
emisor, fecha de timbrado, tipo de comprobante, rfc emisor, uuid,_
receptor, rfc receptor, subtotal, ieps, iva, retiva, retisr, tc, total
"""
respuesta = '\t'.join( map(str, self.lista_valores))
return respuesta
def __calcula_traslados(self):
triva, trieps, trisr = 0., 0., 0
for t in self.__traslados:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if impuesto=='IVA':
triva += importe
elif impuesto=='ISR':
trisr += importe
elif impuesto=='IEPS':
trieps += importe
elif(self.__version=='3.3'):
if impuesto=='002':
triva += importe
elif impuesto=='001':
trisr += importe
elif impuesto=='003':
trieps += importe
return triva, trieps, trisr
def __calcula_retenciones(self):
retiva, retisr = 0., 0.
for t in self.__retenciones:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if(impuesto=='ISR'):
retisr += importe
elif(impuesto=='IVA'):
retiva += importe
elif(self.__version=='3.3'):
if(impuesto=='002'):
retiva += importe
elif(impuesto=='001'):
retisr += importe
return retiva, retisr
@property
def lista_valores(self):
v = [self.__emisornombre,self.__fechatimbrado, self.__tipo, self.__emisorrfc ]
v += [self.__uuid, self.__folio, self.__receptornombre, self.__receptorrfc ]
v += [self.__subtotal, self.__trieps, self.__triva]
v += [self.__retiva, self.__retisr, self.__tcambio, self.__total]
return v
@property
def dic_cfdi(self):
d = {}
d["Emisor"] = self.__emisornombre
d["Fecha_CFDI"] = self.__fechatimbrado
d["Tipo"] = self.__tipo
d["RFC_Emisor"] = self.__emisorrfc
d["Folio_fiscal"] = self.__uuid
d["Folio"] = self.__folio
d["Receptor"] = self.__receptornombre
d["RFC_Receptor"] = self.__receptorrfc
d["Subtotal"] = self.__subtotal
d["IEPS"] = self.__trieps
d["IVA"] = self.__triva
d["Ret IVA"] = self.__retiva
d["Ret ISR"] = self.__retisr
d["TC"] = self.__tcambio
d["Total"] = self.__total
return d
@property
def certificado(self):
return self.__certificado
@property
def sello(self):
return self.__sello
@property
def total(self):
return self.__total
@property
def subtotal(self):
return self.__subtotal
@property
def fechatimbrado(self):
return self.__fechatimbrado
@property
def tipodecambio(self):
return self.__tcambio
@property
def lugar(self):
return self.__lugar
@property
def moneda(self):
return self.__moneda
@property
def traslado_iva(self):
return self.__triva
@property
def traslado_isr(self):
return self.__trisr
@property
def traslado_ieps(self):
return self.__trieps
@property
def n_conceptos(self):
return self.__n_conceptos
@property
def conceptos(self):
return self.__conceptos
@property
def folio(self):
return self.__folio
@staticmethod
def columnas():
return ["Emisor","Fecha_CFDI","Tipo","RFC_Emisor","Folio_fiscal","Folio","Receptor",
"RFC_Receptor", "Subtotal","IEPS","IVA","Ret IVA","Ret ISR","TC","Total"]
@staticmethod
def imprime_reporte(nf, nr):
reporte = "Número de archivos procesados:\t {}\n".format(nf)
reporte += "Número de filas en tsv:\t {}\n".format(nr)
if(nf!=nr):
reporte += "\n\n**** Atención ****\n"
return reporte
L = glob.glob('./*.xml')
#R = [ patt[1:].strip().lower() for patt in re.findall('(<cfdi:[A-z]*\s|<tfd:[A-z]*\s)',fxml)]
if __name__=='__main__':
salida = sys.argv[1]
fout = open(salida,'w')
columnas = CFDI.columnas()
titulo = '\t'.join(columnas)+'\n'
fout.write(titulo)
nl = 0
for f in L:
try:
#print("abriendo {0}".format(f))
rcfdi = CFDI(f)
dic = rcfdi.dic_cfdi
vals = [dic[c] for c in columnas]
strvals = ' \t '.join(map(str, vals))+'\n'
fout.write(strvals)
nl +
|
KiChjang/servo
|
tests/wpt/web-platform-tests/tools/manifest/typedata.py
|
Python
|
mpl-2.0
| 10,877
| 0.000827
|
from collections.abc import MutableMapping
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from typing import Any
from typing import Dict
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
# avoid actually importing these, they're only used by type comments
from . import item
from . import manifest
if MYPY:
TypeDataType = MutableMapping[Tuple[Text, ...], Set[item.ManifestItem]]
PathHashType = MutableMapping[Tuple[Text, ...], Text]
else:
TypeDataType = MutableMapping
PathHashType = MutableMapping
class TypeData(TypeDataType):
def __init__(self, m, type_cls):
# type: (manifest.Manifest, Type[item.ManifestItem]) -> None
"""Dict-like object containing the TestItems for each test type.
Loading an actual Item class for each test is unnecessarily
slow, so this class allows lazy-loading of the test
items. When the manifest is loaded we store the raw json
corresponding to the test type, and only create an Item
subclass when the test is accessed. In order to remain
API-compatible with consumers that depend on getting an Item
from iteration, we do egerly load all items when iterating
over the class."""
self._manifest = m
self._type_cls = type_cls # type: Type[item.ManifestItem]
self._json_data = {} # type: Dict[Text, Any]
self._data = {} # type: Dict[Text, Any]
self._hashes = {} # type: Dict[Tuple[Text, ...], Text]
self.hashes = PathHash(self)
def _delete_node(self, data, key):
# type: (Dict[Text, Any], Tuple[Text, ...]) -> None
"""delete a path from a Dict data with a given key"""
path = []
node = data
for pathseg in key[:-1]:
path.append((node, pathseg))
node = node[pathseg]
if not isinstance(node, dict):
raise KeyError(key)
del node[key[-1]]
while path:
node, pathseg = path.pop()
if len(node[pathseg]) == 0:
del node[pathseg]
else:
break
def __getitem__(self, key):
# type: (Tuple[Text, ...]) -
|
> Set[item.ManifestItem]
node = self._data # type: Union[Dict[Text, Any], Set[item.ManifestItem], List[Any]]
|
for pathseg in key:
if isinstance(node, dict) and pathseg in node:
node = node[pathseg]
else:
break
else:
if isinstance(node, set):
return node
else:
raise KeyError(key)
node = self._json_data
found = False
for pathseg in key:
if isinstance(node, dict) and pathseg in node:
node = node[pathseg]
else:
break
else:
found = True
if not found:
raise KeyError(key)
if not isinstance(node, list):
raise KeyError(key)
self._hashes[key] = node[0]
data = set()
path = "/".join(key)
for test in node[1:]:
manifest_item = self._type_cls.from_json(self._manifest, path, test)
data.add(manifest_item)
node = self._data
assert isinstance(node, dict)
for pathseg in key[:-1]:
node = node.setdefault(pathseg, {})
assert isinstance(node, dict)
assert key[-1] not in node
node[key[-1]] = data
self._delete_node(self._json_data, key)
return data
def __setitem__(self, key, value):
# type: (Tuple[Text, ...], Set[item.ManifestItem]) -> None
try:
self._delete_node(self._json_data, key)
except KeyError:
pass
node = self._data
for i, pathseg in enumerate(key[:-1]):
node = node.setdefault(pathseg, {})
if not isinstance(node, dict):
raise KeyError("%r is a child of a test (%r)" % (key, key[:i+1]))
node[key[-1]] = value
def __delitem__(self, key):
# type: (Tuple[Text, ...]) -> None
try:
self._delete_node(self._data, key)
except KeyError:
self._delete_node(self._json_data, key)
else:
try:
del self._hashes[key]
except KeyError:
pass
def __iter__(self):
# type: () -> Iterator[Tuple[Text, ...]]
"""Iterator over keys in the TypeData in codepoint order"""
data_node = self._data # type: Optional[Dict[Text, Any]]
json_node = self._json_data # type: Optional[Dict[Text, Any]]
path = tuple() # type: Tuple[Text, ...]
stack = [(data_node, json_node, path)]
while stack:
data_node, json_node, path = stack.pop()
if isinstance(data_node, set) or isinstance(json_node, list):
assert data_node is None or json_node is None
yield path
else:
assert data_node is None or isinstance(data_node, dict)
assert json_node is None or isinstance(json_node, dict)
keys = set() # type: Set[Text]
if data_node is not None:
keys |= set(iter(data_node))
if json_node is not None:
keys |= set(iter(json_node))
for key in sorted(keys, reverse=True):
stack.append((data_node.get(key) if data_node is not None else None,
json_node.get(key) if json_node is not None else None,
path + (key,)))
def __len__(self):
# type: () -> int
count = 0
stack = [self._data]
while stack:
v = stack.pop()
if isinstance(v, set):
count += 1
else:
stack.extend(v.values())
stack = [self._json_data]
while stack:
v = stack.pop()
if isinstance(v, list):
count += 1
else:
stack.extend(v.values())
return count
def __nonzero__(self):
# type: () -> bool
return bool(self._data) or bool(self._json_data)
__bool__ = __nonzero__
def __contains__(self, key):
# type: (Any) -> bool
# we provide our own impl of this to avoid calling __getitem__ and generating items for
# those in self._json_data
node = self._data
for pathseg in key:
if pathseg in node:
node = node[pathseg]
else:
break
else:
return bool(isinstance(node, set))
node = self._json_data
for pathseg in key:
if pathseg in node:
node = node[pathseg]
else:
break
else:
return bool(isinstance(node, list))
return False
def clear(self):
# type: () -> None
# much, much simpler/quicker than that defined in MutableMapping
self._json_data.clear()
self._data.clear()
self._hashes.clear()
def set_json(self, json_data):
# type: (Dict[Text, Any]) -> None
"""Provide the object with a raw JSON blob
Note that this object graph is assumed to be owned by the TypeData
object after the call, so the caller must not mutate any part of the
graph.
"""
if self._json_data:
raise ValueError("set_json call when JSON data is not empty")
self._json_data = json_data
def to_json(self):
# type: () -> Dict[Text, Any]
"""Convert the current data to JSON
Note that the returned object may contain references to the internal
data structures, and is only guaranteed to be valid until the next
__getitem__, __setitem__, __delitem__ call, so the caller must not
mutate any
|
nh0815/PySearch
|
pysearch/views.py
|
Python
|
mit
| 154
| 0.012987
|
from
|
django.shortcuts import render, redirect
from django.http import HttpResponse
__author__ = 'Nick'
def index(request):
return redirect('/search')
|
|
SuicSoft/stk-code
|
tools/update_characteristics.py
|
Python
|
gpl-3.0
| 2,103
| 0.009035
|
#!/usr/bin/env python3
#
# SuperTuxKart - a fun racing game with go-kart
# Copyright (C) 2006-2015 SuperTuxKart-Team
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# This script uses create_kart_properties.py to create code and then replaces
# the code in the source files. The parts
|
in the source are marked with tags, that
# contain the argument that has to be passed to create_kart_properties.py.
# The script has to be run from the root directory of this project.
import os
import re
import subprocess
from create_kart_properties import functions
def main():
# Check, if it runs in the root directory
if not os.path.isfile("tools/update_characteristics.py"):
print("Please run this script in the root directory of the project.")
exit(1)
for operation, function in functions.items():
result = subprocess.Popen("tools/create_kart_properties.py " +
operation, shell = True,
stdout = subprocess.PIPE).stdout.read().decode('UTF-8')
with open("src/" + function[2], "r") as f:
text = f.read()
# Replace the text by using look behinds and look forwards
text = re.sub("(?<=/\* \<characteristics-start " + operation +
"\> \*/\\n)(.|\n)*(?=\\n\s*/\* <characteristics-end " + operation + "> \*/)", result, text)
with open("src/" + function[2], "w") as f:
f.write(text)
if __name__ == '__main__':
main()
|
GeneralizedLearningUtilities/SuperGLU
|
python_module/stomp/test/p3_backward_test.py
|
Python
|
mit
| 884
| 0
|
import unittest
from stomp import backward3
class TestBackward3(unittest.TestCase):
def test_pack_mixed_string_and_bytes(self):
lines = ['SEND', '\n', 'header1:test', '\u6771']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
lines = ['SEND', '\n', 'heade
|
r1:test', b'\xe6\x9d\xb1']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
def test_decode(self):
self.assertTrue(backward3.decode(None) is None)
self.assertEqual('test', backward3.decode(b'test'))
def test_encode(self):
self.assertEqual(b'test', backward3.encode('test'))
self.assertEqual(b'test', backward3.en
|
code(b'test'))
self.assertRaises(TypeError, backward3.encode, None)
|
OlympusMonds/PyCircleriser
|
PyCircleriser.py
|
Python
|
gpl-3.0
| 8,228
| 0.007049
|
#============================================================================
# Name : circ-pic.py
# Author : Luke Mondy
# ============================================================================
#
# Copyright (C) 2012 Mondy Luke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ============================================================================
from __future__ import division
import sys
import Image, ImageDraw
import argparse
import numpy as np
from math import sqrt
HAVE_PYPRIND = True
try:
import pyprind
except ImportError:
HAVE_PYPRIND = False
LOGGING = False
def log(message):
global LOGGING
if LOGGING:
print message
sys.stdout.flush()
def getImage(image, scale=1.0, grey=True):
try:
log("Opening image: %s" % image)
im = Image.open(image)
except Exception as e:
error_msg = ("Image file you provided:\n{image}\ndoes not exist! Here's what the computer"
"says:\n{exception}".format(image=image, exception=e))
sys.exit(error_msg)
if scale != 1.0:
im = im.resize(tuple(int(i * scale) for i in im.size))
if grey:
im = im.convert('L')
return im
def overlapping(c1, c2):
# circle data type:
|
# (x, y, rad)
dist = sqrt( (c2[0] - c1[0])**2 + (c2[1] - c1[1])**2 ) # This sqrt is killin' me...
if c1[2] + c2[2] > dist:
return True
return False
def render(circles, path, params, imsize):
log("Rendering...")
if params['bgimg']:
bg = getImage(params['bgimg'],
|
grey=False)
bgim = bg.resize(imsize)
bgpix = bgim.load()
col = params['bgcolour']
col = 255 if col > 255 else col
col = 0 if col < 0 else col
bgcolour = (col, col, col)
outline = (0, 0, 0)
if params['nooutline']:
outline = None
final = Image.new('RGB', imsize, bgcolour)
draw = ImageDraw.Draw(final)
im_x, im_y = imsize
for y in range(im_y):
for x in range(im_x):
circle_radius = circles[x,y]
if circle_radius != 0:
bb = (x - circle_radius, y - circle_radius,
x + circle_radius, y + circle_radius)
fill = bgpix[x, y] if params['bgimg'] else (255, 255, 255)
draw.ellipse(bb, fill=fill, outline=outline)
del draw
final.save(params['outimg'])
def circlerise(params):
global LOGGING
global HAVE_PYPRIND
interval = params['interval']
maxrad = params['maxrad']
scale = params['scale']
im = getImage(params['circimg'], scale)
pixels = im.load()
circles = np.zeros(im.size, int)
"""
=== Algorithm ===
For each pixel in the original image, determine its
"grey" brightness, and determine an appropriate radius
for that.
Now look in the local region for other circles (local
is determined by the max_radius of other circles + the
radius of the current potential circle).
If there is some circles nearby, check to see if the
new circle will overlap with it or not. If all nearby
circles won't overlap, then record the radius in a 2D
array that corresponds to the image.
"""
im_x, im_y = im.size
skips = 0
if LOGGING and HAVE_PYPRIND :
progress = pyprind.ProgBar(im_y, stream=1)
for y in range(0, im_y, interval):
prev_rad = 0
closeness = 0
for x in range(0, im_x, interval):
closeness += 1
# Determine radius
greyval = pixels[x, y]
radius = int(maxrad * (greyval/255))
if radius == 0:
radius = 1
# If we are still going to be inside the last circle
# placed on the same X row, save time and skip.
if prev_rad + radius >= closeness:
skips += 1
continue
bb = [x - radius - maxrad, # Define bounding box.
y - radius - maxrad,
x + radius + maxrad,
y + radius + maxrad]
if bb[0] < 0: # Ensure the bounding box is OK with
bb[0] = 0 # edges. We don't need to check the
if bb[1] < 0: # outer edges because it's OK for the
bb[1] = 0 # centre to be right on the edge.
if bb[2] >= im_x:
bb[2] = im_x - 1
if bb[3] >= im_y:
bb[3] = im_y - 1
c1 = (x, y, radius)
# Use bounding box and numpy to extract the local area around the
# circle. Then use numpy to do a boolean operating to give a
# true/false matrix of whether circles are nearby.
local_area = circles[bb[0]:bb[2], bb[1]:bb[3]]
circle_nearby = local_area != 0
coords_of_local_circles = np.where(circle_nearby)
radii_of_local_cirles = np.expand_dims(local_area[circle_nearby], axis=0) # Need the extra dim for next step
nrby_cirles = np.vstack([coords_of_local_circles, radii_of_local_cirles])
nrby_cirles = nrby_cirles.transpose()
any_overlaps_here = False
if nrby_cirles.shape[0] == 0:
circles[x,y] = radius
prev_rad = radius
closeness = 0
else:
for n in nrby_cirles:
c2 = (n[0]+bb[0], n[1]+bb[1], n[2])
overlap = overlapping(c1, c2)
if overlap:
any_overlaps_here = True
break
# Look if any nearby circles overlap. If any do, don't make
# a circle here.
if not any_overlaps_here:
circles[x, y] = radius
prev_rad = radius
closeness = 0
if LOGGING is True and HAVE_PYPRIND is True:
progress.update()
log("Avoided {skips} calculations".format(skips=skips))
render(circles, "", params, im.size)
def main(argv=None):
parser = argparse.ArgumentParser(description="Using imgcirc!")
addarg = parser.add_argument # just for cleaner code
addarg("--circimg", type=str, required=True,
help="The image that will make up the circles.", )
addarg("--interval", type=int, default=1,
help="Interval between pixels to look at in the circimg. 1 means all pixels.")
addarg("--bgimg", type=str,
help="An image to colour the circles with. Will be resized as needed.")
addarg("--outimg", type=str, required=True,
help="Filename for the outputted image.")
addarg("--maxrad", type=int, default=10,
help="Max radius of a circle (corresponds to a white pixel)")
addarg("--scale", type=float, default=1,
help="Percent to scale up the circimg (sometimes makes it look better).")
addarg("--bgcolour", type=int, default=255,
help="Grey-scale val from 0 to 255")
addarg("--nooutline", action='store_true', default=False,
help="When specified, no outline will be drawn on circles.")
addarg("--log", action='store_true', default=False,
help="Write progress to stdout.")
parsed_args = parser.parse_args()
params = dict(parsed_args.__dict__)
global LOGGING
if params["log"] is True:
LOGGING = True
log("Begin circlerising...")
circlerise(params)
|
monouno/site
|
judge/migrations/0039_remove_contest_is_external.py
|
Python
|
agpl-3.0
| 399
| 0
|
# -*- coding: utf-8 -*-
# Genera
|
ted by Django 1.9.7 on 2016-08-09 22:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
|
('judge', '0038_profile_problem_count'),
]
operations = [
migrations.RemoveField(
model_name='contest',
name='is_external',
),
]
|
AaronGeist/Llama
|
core/emailsender.py
|
Python
|
gpl-3.0
| 1,822
| 0.000551
|
# -*- coding:utf-8 -*-
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
from core.enigma import Enigma
from model.email import Email
from util.config import Config
class EmailSender:
@classmethod
def format_addr(cls, s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
@classmethod
def build_msg(cls, email):
msg = MIMEText(email.body, 'plain', 'utf-8')
msg['From'] = cls.format_addr(u'自己 <%s>' % email.from_addr)
msg['To'] = cls.format_addr(u'自己 <%s>' % email.to_addr)
msg['Subject'] = Header(email.title, 'utf-8').encode()
return msg
@classmethod
def generate_email(cls, title, content):
email = Email()
email.from_addr = Config.get("email_from_addr")
email.to_addr = Config.get("email_to_addr")
email.password = Enigma.decrypt(Config.get("email_password"))
email.stmp_server = Config.get("email_stmp_server")
|
email.stmp_port = Config.get("email_stmp_port")
email.is_ssl = Config.get("email_is_ssl")
email.title = title
email.body = content
return email
@classmethod
def send(cls, title, content):
email = cls.generate_email(title, content)
msg = cls.build_msg(email)
if email.is_ssl:
server = smtplib.SMTP_SSL(email.stmp_server, email.stmp_port)
|
else:
server = smtplib.SMTP(email.stmp_server, email.stmp_port)
# server.set_debuglevel(1)
server.login(email.from_addr, email.password)
server.sendmail(email.from_addr, email.to_addr, msg.as_string())
server.quit()
if __name__ == "__main__":
EmailSender.send("test", "test")
|
redline-forensics/auto-dm
|
controllers/main_ctrl.py
|
Python
|
gpl-3.0
| 2,757
| 0.001814
|
from controllers.job_ctrl import JobController
from models.job_model import JobModel
from views.job_view import JobView
class MainController(object):
def __init__(self, main_model):
self.main_view = None
self.main_model = main_model
self.main_model.begin_job_fetch.connect(self.on_begin_job_fetch)
self.main_model.update_job_fetch_progress.connect(self.on_job_fetch_update)
self.main_model.fetched_job.connect(self.on_fetched_job)
def init_ui(self, main_view):
self.main_view = main_view
self.init_hotkeys()
def init_hotkeys(self):
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "J"], self.main_view.focus_job_num_edit)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "O"], self.main_view.open_current_job_folder)
self.main_model.hotkey_model.add_hotkey(["Lcontrol", "Lmenu", "B"], self.main_view.open_current_job_basecamp)
self.main_model.hotkey_model.start_detection()
def fetch_job(self):
job_num = self.main_view.job_num
if self.main_model.job_exists(job_num):
self.main_view.show_job_already_exists_dialog()
return
self.main_model.fetch_job(job_num)
def cancel_job_fetch(self):
self.main_model.cancel_job_fetch()
def on_begin_job_fetch(self, max):
self.main_view.show_job_fetch_progress_dialog(max)
def on_job_fetch_update(self, progress):
self.main_view.update_job_fetch_progress_dialog(progress)
def on_fetched_job(self, job_num, base_folder):
job = JobModel(job_num,
base_folder,
self.main_model.settings_model.basecamp_email,
self.main_model.settings_model.basecamp_password,
self.main_model.settings_model.google_maps_js_api_key,
self.main_model.settings_model.google_maps_static_api_key,
self.main_model.settings_model.google_earth_exe_path,
self.main_model.settings_model.scene_exe_path)
self.main_model.jobs[job.job_num] = job
|
found = bool(job.base_folder)
self.main_
|
view.close_job_fetch_progress_dialog()
if not found:
open_anyway = self.main_view.show_job_not_found_dialog()
if not open_anyway:
return
job_view = JobView(JobController(job))
job_view.request_minimize.connect(self.main_view.close)
self.main_view.add_tab(job_view, job.job_name)
def remove_job(self, index):
job_num = int(self.main_view.ui.jobs_tab_widget.tabText(index)[1:])
self.main_model.jobs.pop(job_num, None)
self.main_view.remove_tab(index)
|
shepdelacreme/ansible
|
lib/ansible/modules/cloud/openstack/os_volume.py
|
Python
|
gpl-3.0
| 5,265
| 0.00133
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB. This parameter is required when the
I(state) parameter is 'present'.
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
volume_type:
description:
- Volume type for volume
image:
description:
- Image name or id for boot from volume
snapshot_id:
description:
- Volume snapshot id to create from
volume:
description:
- Volume name or id to create from
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
scheduler_hints:
description:
- Scheduler hints passed to volume API in form of dict
version_added: "2.4"
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
scheduler_hints:
same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
'''
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
if module.params['volume']:
volume_id = cloud.get_volume_id(module.params['volume'])
if not volume_id:
module.fail_json(msg="Failed to find volume '%s'" % module.params['volume'])
volume_args['source_volid'] = volume_id
if module.params['scheduler_hints']:
volume_args['scheduler_hints'] = module.params['scheduler_hints']
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
module.exit_json(changed=True, id=volume['id'], volume=volume
|
)
def _absent_volume(module, cloud, sdk):
changed = False
if cloud.volume_exists(module.params['display_name']):
try:
changed = cloud.delete_volume(name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except sdk.excepti
|
ons.ResourceTimeout:
module.exit_json(changed=changed)
module.exit_json(changed=changed)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
volume=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
scheduler_hints=dict(default=None, type='dict')
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id', 'volume'],
],
)
module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
sdk, cloud = openstack_cloud_from_module(module)
try:
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud, sdk)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
shear/rppy
|
temp_test_ortho.py
|
Python
|
bsd-2-clause
| 1,170
| 0.005128
|
import rppy
im
|
port numpy as np
import matplotlib.pyplot as plt
vp1 = 3000
vs1 = 1500
p1 = 2000
e1_1 = 0.0
d1_1 = 0.0
y1_1 = 0.0
e2_1 = 0.0
d2_1 = 0.0
y2_1 = 0.0
d3_1 = 0.0
chi1 = 0.0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, e1_1, d1_1, y1_1, e2_1, d2_1, y2_1, d3_1)
vp2 = 4000
vs2 = 2000
p
|
2 = 2200
e1_2 = 0.0
d1_2 = 0.0
y1_2 = 0.0
e2_2 = 0.0
d2_2 = 0.0
y2_2 = 0.0
d3_2 = 0.0
chi2 = 0.0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, e1_2, d1_2, y1_2, e2_2, d2_2, y2_2, d3_2)
phi = np.arange(0, 90, 1)
theta = np.arange(0, 90, 1)
loopang = phi
theta = np.array([30])
rphti = np.zeros(np.shape(loopang))
rpzoe = np.zeros(np.shape(loopang))
rprug = np.zeros(np.shape(loopang))
for aid, val in enumerate(loopang):
rphti[aid] = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, loopang[aid], theta)
rprug[aid] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e2_1, d2_1, y2_1, vp2, vs2, p2, e2_2, d2_2, y2_2, np.radians(theta), np.radians(loopang[aid]))
rpzoe[aid] = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, np.radians(theta))
plt.figure(1)
plt.plot(loopang, rphti, loopang, rprug, loopang, rpzoe)
plt.legend(['hti', 'ruger', 'zoe'])
plt.show()
|
thombashi/pytablewriter
|
test/writer/text/rst/test_rst_csv_writer.py
|
Python
|
mit
| 6,875
| 0.001309
|
"""
.. codeauthor:: Tsuyoshi Hombas
|
hi <tsuyoshi.hombashi@gmail.com>
"""
from textwrap import dedent
import pytest
import pytablewriter
from ...._common import print_test_result
from ....data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_iter,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
from .._common import regexp_ansi_escape, strip_ansi_escape
normal_test_data_list = [
|
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=0,
header=headers,
value=None,
expected=dedent(
"""\
.. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 3, 4, 3
"""
),
),
Data(
table=None,
indent=0,
header=None,
value=value_matrix,
expected=dedent(
"""\
.. csv-table::
:widths: 1, 5, 5, 3, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
"""
),
),
Data(
table="",
indent=1,
header=headers,
value=value_matrix,
expected=""" .. csv-table::
:header: "a", "b", "c", "dd", "e"
:widths: 3, 5, 5, 4, 6
1, 123.1, "a", 1.0, 1
2, 2.2, "bb", 2.2, 2.2
3, 3.3, "ccc", 3.0, "cccc"
""",
),
Data(
table="table name",
indent=0,
header=headers,
value=value_matrix_with_none,
expected=dedent(
"""\
.. csv-table:: table name
:header: "a", "b", "c", "dd", "e"
:widths: 3, 3, 5, 4, 6
1, , "a", 1.0,
, 2.2, , 2.2, 2.2
3, 3.3, "ccc", , "cccc"
, , , ,
"""
),
),
Data(
table="table name",
indent=0,
header=mix_header_list,
value=mix_value_matrix,
expected=dedent(
"""\
.. csv-table:: table name
:header: "i", "f", "c", "if", "ifc", "bool", "inf", "nan", "mix_num", "time"
:widths: 3, 4, 6, 4, 5, 6, 8, 5, 9, 27
1, 1.10, "aa", 1.0, 1, True, Infinity, NaN, 1, 2017-01-01T00:00:00
2, 2.20, "bbb", 2.2, 2.2, False, Infinity, NaN, Infinity, "2017-01-02 03:04:05+09:00"
3, 3.33, "cccc", -3.0, "ccc", True, Infinity, NaN, NaN, 2017-01-01T00:00:00
"""
),
),
]
table_writer_class = pytablewriter.RstCsvTableWriter
class Test_RstCsvTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_RstCsvTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_styles(self):
writer = table_writer_class()
writer.from_tabledata(vut_style_tabledata)
writer.column_styles = vut_styles
expected = dedent(
"""\
.. csv-table:: style test
:header: "none", "empty", "tiny", "small", "medium", "large", "null w/ bold", "L bold", "S italic", "L bold italic"
:widths: 6, 7, 6, 7, 8, 7, 14, 8, 10, 15
111, 111, 111, 111, "111", 111, , **111**, *111*, **111**
1234, 1234, 1234, 1234, "1,234", 1 234, , **1234**, *1234*, **1234**
"""
)
out = writer.dumps()
print_test_result(expected=expected, actual=out)
assert regexp_ansi_escape.search(out)
assert strip_ansi_escape(out) == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.set_indent_level(indent)
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
assert str(writer) == ""
class Test_RstCsvTableWriter_write_table_iter:
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[
[
"tablename",
["ha", "hb", "hc"],
value_matrix_iter,
dedent(
"""\
.. csv-table:: tablename
:header: "ha", "hb", "hc"
:widths: 5, 5, 5
1, 2, 3
11, 12, 13
1, 2, 3
11, 12, 13
101, 102, 103
1001, 1002, 1003
"""
),
]
],
)
def test_normal(self, capsys, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.iteration_length = len(value)
writer.write_table_iter()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "header", "value", "expected"],
[[data.table, data.header, data.value, data.expected] for data in null_test_data_list],
)
def test_normal_smoke(self, table, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.headers = header
writer.value_matrix = value
writer.write_table_iter()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/ios/ios_static_route.py
|
Python
|
bsd-3-clause
| 7,090
| 0.001551
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_static_route
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage static IP routes on Cisco IOS network devices
description:
- This module provides declarative management of static
IP routes on Cisco IOS network devices.
notes:
- Tested against IOS 15.6
options:
prefix:
description:
- Network prefix of the static route.
mask:
description:
- Network prefix mask of the static route.
next_hop:
desc
|
ription:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions.
state:
description:
- State of the static r
|
oute configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
- name: remove configuration
ios_static_route:
prefix: 192.168.2.0
mask: 255.255.255.0
next_hop: 10.0.0.1
state: absent
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- name: Add static route aggregates
ios_static_route:
aggregate:
- { prefix: 172.16.32.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
- { prefix: 172.16.33.0, mask: 255.255.255.0, next_hop: 10.0.0.8 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 192.168.2.0 255.255.255.0 10.0.0.1
"""
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.ios import load_config, run_commands
from ansible.module_utils.ios import ios_argument_spec, check_args
from ipaddress import ip_network
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s %s' % (prefix, mask, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %s %s' % (prefix, mask, next_hop,
admin_distance))
return commands
def map_config_to_obj(module):
obj = []
rc, out, err = exec_command(module, 'show ip static route')
match = re.search(r'.*Static local RIB for default\s*(.*)$', out, re.DOTALL)
if match and match.group(1):
for r in match.group(1).splitlines():
splitted_line = r.split()
code = splitted_line[0]
if code != 'M':
continue
cidr = ip_network(to_text(splitted_line[1]))
prefix = str(cidr.network_address)
mask = str(cidr.netmask)
next_hop = splitted_line[4]
admin_distance = splitted_line[2][1]
obj.append({'prefix': prefix, 'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
return obj
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'].strip(),
'mask': module.params['mask'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': str(module.params['admin_distance']),
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'mask', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_together=required_together)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
quaddra/dist_job_mgr
|
setup.py
|
Python
|
apache-2.0
| 639
| 0.00626
|
from setuptools import setup, find_packages
from dist_job
|
_mgr.version import VERSION
setup(
name='dist_job_mgr',
version=VERSION,
author='genForma Corp',
author_email='code@genforma.com',
url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
entry_points = {
'console_scripts': [
'djmctl = dist_job_mgr.djmctl:main',
'djm-worker = dist_job_mgr.worke
|
r_main:main'
]},
install_requires=['lockfile>=0.9',], # 'python-daemon'],
license='Apache V2.0',
description='Distributed Job Manager',
long_description="description"
)
|
cidadania/e-cidadania
|
src/helpers/cache.py
|
Python
|
apache-2.0
| 1,903
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"
|
""
This file contains functions to help with caching.
"""
# Django's cache module
from django.core.cache import cache
# Cached models
from core.spaces.models
|
import Space
# Response types
from django.shortcuts import get_object_or_404
# Tries to get the object from cache
# Else queries the database
# Else returns a 404 error
def _get_cache_key_for_model(model, key):
"""
Returns a unique key for the given model.
We prefix the given `key` with the name of the `model` to provide a further
degree of uniqueness of keys across the cache.
"""
if not isinstance(key, basestring):
raise TypeError('key must be str or a unicode string')
return model.__name__ + '_' + key
def get_or_insert_object_in_cache(model, key, *args, **kwargs):
"""
Returns an instance of the `model` stored in the cache with the given key.
If the object is not found in the cache, it is retrieved from the database
and set in the cache.
"""
actual_key = _get_cache_key_for_model(model, key)
return_object = cache.get(actual_key)
if not return_object:
return_object = get_object_or_404(model, *args, **kwargs)
cache.set(actual_key, return_object)
return return_object
|
abinashk-inf/AstroBox
|
src/astroprint/printerprofile/__init__.py
|
Python
|
agpl-3.0
| 2,918
| 0.002742
|
# coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2017 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
# singleton
_instance = None
def printerProfileManager():
global _instance
if _instance is None:
_instance = PrinterProfileManager()
return _instance
import os
import yaml
import logging
import shutil
from octoprint.settings import settings
class PrinterProfileManager(object):
def __init__(self):
self._settings = settings()
configDir = self._settings.getConfigFolder()
self._infoFile = "%s/printer-profile.yaml" % configDir
self._logger = logging.getLogger(__name__)
|
self.data = {
'driver': "marlin",
'extruder_count': 1,
'max_nozzle_temp': 280,
'max_bed_temp': 140,
'heated_bed': True,
'cancel_gcode': ['G
|
28 X0 Y0'],
'invert_z': False
}
if not os.path.isfile(self._infoFile):
factoryFile = "%s/printer-profile.factory" % configDir
if os.path.isfile(factoryFile):
shutil.copy(factoryFile, self._infoFile)
else:
open(self._infoFile, 'w').close()
if self._infoFile:
config = None
with open(self._infoFile, "r") as f:
config = yaml.safe_load(f)
def merge_dict(a, b):
for key in b:
if isinstance(b[key], dict):
merge_dict(a[key], b[key])
else:
a[key] = b[key]
if config:
merge_dict(self.data, config)
def save(self):
with open(self._infoFile, "wb") as infoFile:
yaml.safe_dump(
self.data,
infoFile,
default_flow_style=False,
indent=" ",
allow_unicode=True
)
def set(self, changes):
for k in changes:
if k in self.data:
if self.data[k] != changes[k]:
if k == 'driver':
# change printer object
from astroprint.printer.manager import printerManager
printerManager(changes['driver'])
self.data[k] = self._clean(k, changes[k])
else:
self._logger.error(
"trying to set unkonwn printer profile field %s to %s" % \
(k, str(changes[k])))
def _clean(self, field, value):
if field in ['extruder_count', 'max_nozzle_temp', 'max_bed_temp']:
return int(value)
elif field == 'heated_bed':
return bool(value)
else:
return value
|
amarquand/nispat
|
pcntoolkit/normative_model/norm_blr.py
|
Python
|
gpl-3.0
| 8,464
| 0.006971
|
from __future__ import print_function
from __future__ import division
import os
import sys
import numpy as np
import pandas as pd
from ast import literal_eval
try: # run as a package if installed
from pcntoolkit.model.bayesreg import BLR
from pcntoolkit.normative_model.norm_base import NormBase
from pcntoolkit.dataio import fileio
from pcntoolkit.util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
except ImportError:
pass
path = os.path.abspath(os.path.dirname(__file__))
if path not in sys.path:
sys.path.append(path)
del path
from model.bayesreg import BLR
from norm_base import NormBase
from dataio import fileio
from util.utils import create_poly_basis, WarpBoxCox, \
WarpAffine, WarpCompose, WarpSinArcsinh
class NormBLR(NormBase):
""" Normative modelling based on Bayesian Linear Regression
"""
def __init__(self, **kwargs):
X = kwargs.pop('X', None)
y = kwargs.pop('y', None)
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
self.optim_alg = kwargs.get('optimizer','powell')
if X is None:
raise(ValueError, "Data matrix must be specified")
if len(X.shape) == 1:
self.D = 1
else:
self.D = X.shape[1]
# Parse model order
if kwargs is None:
model_order = 1
elif 'configparam' in kwargs: # deprecated syntax
model_order = kwargs.pop('configparam')
elif 'model_order' in kwargs:
model_order = kwargs.pop('model_order')
else:
model_order = 1
# Force a default model order and check datatype
if model_order is None:
model_order = 1
if type(model_order) is not int:
model_order = int(model_order)
# configure heteroskedastic noise
if 'varcovfile' in kwargs:
var_cov_file = kwargs.get('varcovfile')
if var_cov_file.endswith('.pkl'):
self.var_covariates = pd.read_pickle(var_cov_file)
else:
self.var_covariates = np.loadtxt(var_cov_file)
if len(self.var_covariates.shape) == 1:
self.var_covariates = self.var_covariates[:, np.newaxis]
n_beta = self.var_covariates.shape[1]
self.var_groups = None
elif 'vargroupfile' in kwargs:
# configure variance groups (e.g. site specific variance)
var_groups_file = kwargs.pop('vargroupfile')
if var_groups_file.endswith('.pkl'):
self.var_groups = pd.read_pickle(var_groups_file)
else:
self.var_groups = np.loadtxt(var_groups_file)
var_ids = set(self.var_groups)
var_ids = sorted(list(var_ids))
n_beta = len(var_ids)
else:
self.var_groups = None
self.var_covariates = None
n_beta = 1
# are we using ARD?
if 'use_ard' in kwargs:
self.use_ard = kwargs.pop('use_ard')
else:
self.use_ard = False
if self.use_ard:
n_alpha = self.D * model_order
else:
n_alpha = 1
# Configure warped likelihood
if 'warp' in kwargs:
warp_str = kwargs.pop('warp')
if warp_str is None:
self.warp = None
n_gamma = 0
else:
# set up warp
exec('self.warp =' + warp_str + '()')
n_gamma = self.warp.get_n_params()
else:
self.warp = None
n_gamma = 0
self._n_params = n_alpha + n_beta + n_gamma
self._model_order = model_order
print("configuring BLR ( order", model_order, ")")
if (theta is None) or (len(theta) != self._n_params):
print("Using default hyperparameters")
self.theta0 = np.zeros(self._n_params)
else:
self.theta0 = theta
self.theta = self.theta0
# initialise the BLR object if the required parameters are present
if (theta is not None) and (y is not None):
Phi = create_poly_basis(X, self._model_order)
self.blr = BLR(theta=theta, X=Phi, y=y,
warp=self.warp, **kwargs)
else:
self.blr = BLR(**kwargs)
@property
def n_params(self):
return self._n_params
@property
def neg_log_lik(self):
return self.blr.nlZ
def estimate(self, X, y, **kwargs):
theta = kwargs.pop('theta', None)
if isinstance(theta, str):
theta = np.array(literal_eval(theta))
# remove warp string to prevent it being passed to the blr object
kwargs.pop('warp',None)
Phi = create_poly_basis(X, self._model_order)
if len(y.shape) > 1:
y = y.ravel()
if theta is None:
theta = self.theta0
# (re-)initia
|
lize BLR object because parameters were not specified
self.blr = BLR(theta=theta, X=Phi, y=y,
var_groups=self.var_groups,
warp=self.warp, **kwargs)
self.theta = self.blr.estimate(theta, Phi, y,
var_covariates=self.var_c
|
ovariates, **kwargs)
return self
def predict(self, Xs, X=None, y=None, **kwargs):
theta = self.theta # always use the estimated coefficients
# remove from kwargs to avoid downstream problems
kwargs.pop('theta', None)
Phis = create_poly_basis(Xs, self._model_order)
if X is None:
Phi =None
else:
Phi = create_poly_basis(X, self._model_order)
# process variance groups for the test data
if 'testvargroupfile' in kwargs:
var_groups_test_file = kwargs.pop('testvargroupfile')
if var_groups_test_file.endswith('.pkl'):
var_groups_te = pd.read_pickle(var_groups_test_file)
else:
var_groups_te = np.loadtxt(var_groups_test_file)
else:
var_groups_te = None
# process test variance covariates
if 'testvarcovfile' in kwargs:
var_cov_test_file = kwargs.get('testvarcovfile')
if var_cov_test_file.endswith('.pkl'):
var_cov_te = pd.read_pickle(var_cov_test_file)
else:
var_cov_te = np.loadtxt(var_cov_test_file)
else:
var_cov_te = None
# do we want to adjust the responses?
if 'adaptrespfile' in kwargs:
y_adapt = fileio.load(kwargs.pop('adaptrespfile'))
if len(y_adapt.shape) == 1:
y_adapt = y_adapt[:, np.newaxis]
else:
y_adapt = None
if 'adaptcovfile' in kwargs:
X_adapt = fileio.load(kwargs.pop('adaptcovfile'))
Phi_adapt = create_poly_basis(X_adapt, self._model_order)
else:
Phi_adapt = None
if 'adaptvargroupfile' in kwargs:
var_groups_adapt_file = kwargs.pop('adaptvargroupfile')
if var_groups_adapt_file.endswith('.pkl'):
var_groups_ad = pd.read_pickle(var_groups_adapt_file)
else:
var_groups_ad = np.loadtxt(var_groups_adapt_file)
else:
var_groups_ad = None
if y_adapt is None:
yhat, s2 = self.blr.predict(theta, Phi, y, Phis,
var_groups_test=var_groups_te,
var_covariates_test=var_cov_te,
**kwargs)
else:
yhat, s2 = self.blr.predict_and_adjust(theta, Phi_ada
|
wuzhy/autotest
|
client/tests/kvm/kvm.py
|
Python
|
gpl-2.0
| 5,376
| 0.000558
|
import os, logging, imp
from autotest_lib.client.bin import test
from autotest_lib.client.common_lib import error
from autotest_lib.client.virt import virt_utils, virt_env_process
class kvm(test.test):
"""
Suite of KVM virtualization functional tests.
Contains tests for testing both KVM kernel code and userspace code.
@copyright: Red Hat 2008-2009
@author: Uri Lublin (uril@redhat.com)
@author: Dror Russo (drusso@redhat.com)
@author: Michael Goldish (mgoldish@redhat.com)
@author: David Huff (dhuff@redhat.com)
@author: Alexey Eromenko (aeromenk@redhat.com)
@author: Mike Burns (mburns@redhat.com)
@see: http://www.linux-kvm.org/page/KVM-Autotest/Client_Install
(Online doc - Getting started with KVM testing)
"""
version = 1
env_version = 1
preserve_srcdir = True
#preserve_srcdir = False
def run_once(self, params):
# Convert params to a Params object
params = virt_utils.Params(params)
# If a dependency test prior to this test has failed, let's fail
# it right away as TestNA.
if params.get("dependency_failed") =
|
= 'yes':
raise error.TestNAError("Test dependency failed")
# Report the parameters we've received
|
and write them as keyvals
logging.debug("Test parameters:")
keys = params.keys()
keys.sort()
for key in keys:
logging.debug(" %s = %s", key, params[key])
self.write_test_keyval({key: params[key]})
# Set the log file dir for the logging mechanism used by kvm_subprocess
# (this must be done before unpickling env)
virt_utils.set_log_file_dir(self.debugdir)
# Open the environment file
logging.info("Unpickling env. You may see some harmless error "
"messages.")
env_filename = os.path.join(self.bindir, params.get("env", "env"))
env = virt_utils.Env(env_filename, self.env_version)
test_passed = False
try:
try:
try:
# Get the test routine corresponding to the specified
# test type
t_type = params.get("type")
# Verify if we have the correspondent source file for it
virt_dir = os.path.dirname(virt_utils.__file__)
subtest_dir_virt = os.path.join(virt_dir, "tests")
subtest_dir_kvm = os.path.join(self.bindir, "tests")
subtest_dir = None
for d in [subtest_dir_kvm, subtest_dir_virt]:
module_path = os.path.join(d, "%s.py" % t_type)
if os.path.isfile(module_path):
subtest_dir = d
break
if subtest_dir is None:
raise error.TestError("Could not find test file %s.py "
"on either %s or %s directory" %
subtest_dir_kvm, subtest_dir_virt)
# Load the test module
f, p, d = imp.find_module(t_type, [subtest_dir])
test_module = imp.load_module(t_type, f, p, d)
f.close()
# Preprocess
try:
virt_env_process.preprocess(self, params, env)
finally:
env.save()
# Run the test function
run_func = getattr(test_module, "run_%s" % t_type)
try:
run_func(self, params, env)
finally:
env.save()
test_passed = True
except Exception, e:
logging.error("Test failed: %s: %s",
e.__class__.__name__, e)
try:
virt_env_process.postprocess_on_error(
self, params, env)
finally:
env.save()
raise
finally:
# Postprocess
try:
try:
virt_env_process.postprocess(self, params, env)
except Exception, e:
if test_passed:
raise
logging.error("Exception raised during "
"postprocessing: %s", e)
finally:
env.save()
except Exception, e:
if params.get("abort_on_error") != "yes":
raise
# Abort on error
logging.info("Aborting job (%s)", e)
for vm in env.get_all_vms():
if vm.is_dead():
continue
logging.info("VM '%s' is alive.", vm.name)
for m in vm.monitors:
logging.info("'%s' has a %s monitor unix socket at: %s",
vm.name, m.protocol, m.filename)
logging.info("The command line used to start '%s' was:\n%s",
vm.name, vm.make_qemu_command())
raise error.JobError("Abort requested (%s)" % e)
|
highfei2011/spark
|
python/pyspark/sql/tests/test_serde.py
|
Python
|
apache-2.0
| 6,215
| 0.00177
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import shutil
import tempfile
import time
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, UTCOffsetTimezone
class SerdeTests(ReusedSQLTestCase):
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s
|
="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_filter_with_datetime(s
|
elf):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.testing.sqlutils import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
# The empty bytearray is test for SPARK-21534.
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')],
[bytearray(b'')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
def test_int_array_serialization(self):
# Note that this test seems dependent on parallelism.
# This issue is because internal object map in Pyrolite is not cleared after op code
# STOP. If we use protocol 4 to pickle Python objects, op code MEMOIZE will store
# objects in the map. We need to clear up it to make sure next unpickling works on
# clear map.
data = self.spark.sparkContext.parallelize([[1, 2, 3, 4]] * 100, numSlices=12)
df = self.spark.createDataFrame(data, "array<integer>")
self.assertEqual(len(list(filter(lambda r: None in r.value, df.collect()))), 0)
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_serde import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
ilayn/scipy
|
scipy/fft/tests/test_fftlog.py
|
Python
|
bsd-3-clause
| 5,819
| 0
|
import warnings
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.fft._fftlog import fht, ifht, fhtoffset
from scipy.special import poch
def test_fht_agrees_with_fftlog():
# check that fht numerically agrees with the output from Fortran FFTLog,
# the results were generated with the provided `fftlogtest` program,
# after fixing how the k array is generated (divide range by n-1, not n)
# test function, analytical Hankel transform is of the same form
def f(r, mu):
return r**(mu+1)*np.exp(-r**2/2)
r = np.logspace(-4, 4, 16)
dln = np.log(r[1]/r[0])
mu = 0.3
offset = 0.0
bias = 0.0
a = f(r, mu)
# test 1: compute as given
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-0.1159922613593045E-02, +0.1625822618458832E-02,
-0.1949518286432330E-02, +0.3789220182554077E-02,
+0.5093959119952945E-03, +0.2785387803618774E-01,
+0.9944952700848897E-01, +0.4599202164586588E+00,
+0.3157462160881342E+00, -0.8201236844404755E-03,
-0.7834031308271878E-03, +0.3931444945110708E-03,
-0.2697710625194777E-03, +0.3568398050238820E-03,
-0.5554454827797206E-03, +0.8286331026468585E-03]
assert_allclose(ours, theirs)
# test 2: change to optimal offset
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.4353768523152057E-04, -0.9197045663594285E-05,
+0.3150140927838524E-03, +0.9149121960963704E-03,
+0.5808089753959363E-02, +0.2548065256377240E-01,
+0.1339477692089897E+00, +0.4821530509479356E+00,
+0.2659899781579785E+00, -0.1116475278448113E-01,
+0.1791441617592385E-02, -0.4181810476548056E-03,
+0.1314963536765343E-03, -0.5422057743066297E-04,
+0.3208681804170443E-04, -0.2696849476008234E-04]
assert_allclose(ours, theirs)
# test 3: positive bias
bias = 0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [-7.3436673558316850E+00, +0.1710271207817100E+00,
+0.1065374386206564E+00, -0.5121739602708132E-01,
+0.2636649319269470E-01, +0.1697209218849693E-01,
+0.1250215614723183E+00, +0.4739583261486729E+00,
+0.2841149874912028E+00, -0.8312764741645729E-02,
+0.1024233505508988E-02, -0.1644902767389120E-03,
+0.3305775476926270E-04, -0.7786993194882709E-05,
+0.1962258449520547E-05, -0.8977895734909250E-06]
assert_allclose(ours, theirs)
# test 4: negative bias
bias = -0.8
offset = fhtoffset(dln, mu, bias=bias)
ours = fht(a, dln, mu, offset=offset, bias=bias)
theirs = [+0.8985777068568745E-05, +0.4074898209936099E-04,
+0.2123969254700955E-03, +0.1009558244834628E-02,
+0.5131386375222176E-02, +0.2461678673516286E-01,
+0.1235812845384476E+00, +0.4719570096404403E+00,
+0.2893487490631317E+00, -0.1686570611318716E-01,
+0.2231398155172505E-01, -0.1480742256379873E-01,
+0.1692387813500801E+00, +0.3097490354365797E+00,
+2.7593607182401860E+00, 10.5251075070045800E+00]
assert_allclose(ours, theirs)
@pytest.mark.parametrize('optimal', [True, False])
@pytest.mark.parametrize('offset', [0.0, 1.0, -1.0])
@pytest.mark.parametrize('bias', [0, 0.1, -0.1])
@pytest.mark.parametrize('n', [64, 63])
def test_fht_identity(n, bias, offset, optimal):
rng = np.random.RandomStat
|
e(3491349965)
a =
|
rng.standard_normal(n)
dln = rng.uniform(-1, 1)
mu = rng.uniform(-2, 2)
if optimal:
offset = fhtoffset(dln, mu, initial=offset, bias=bias)
A = fht(a, dln, mu, offset=offset, bias=bias)
a_ = ifht(A, dln, mu, offset=offset, bias=bias)
assert_allclose(a, a_)
def test_fht_special_cases():
rng = np.random.RandomState(3491349965)
a = rng.standard_normal(64)
dln = rng.uniform(-1, 1)
# let xp = (mu+1+q)/2, xm = (mu+1-q)/2, M = {0, -1, -2, ...}
# case 1: xp in M, xm in M => well-defined transform
mu, bias = -4.0, 1.0
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 2: xp not in M, xm in M => well-defined transform
mu, bias = -2.5, 0.5
with warnings.catch_warnings(record=True) as record:
fht(a, dln, mu, bias=bias)
assert not record, 'fht warned about a well-defined transform'
# case 3: xp in M, xm not in M => singular transform
mu, bias = -3.5, 0.5
with pytest.warns(Warning) as record:
fht(a, dln, mu, bias=bias)
assert record, 'fht did not warn about a singular transform'
# case 4: xp not in M, xm in M => singular inverse transform
mu, bias = -2.5, 0.5
with pytest.warns(Warning) as record:
ifht(a, dln, mu, bias=bias)
assert record, 'ifht did not warn about a singular transform'
@pytest.mark.parametrize('n', [64, 63])
def test_fht_exact(n):
rng = np.random.RandomState(3491349965)
# for a(r) a power law r^\gamma, the fast Hankel transform produces the
# exact continuous Hankel transform if biased with q = \gamma
mu = rng.uniform(0, 3)
# convergence of HT: -1-mu < gamma < 1/2
gamma = rng.uniform(-1-mu, 1/2)
r = np.logspace(-2, 2, n)
a = r**gamma
dln = np.log(r[1]/r[0])
offset = fhtoffset(dln, mu, initial=0.0, bias=gamma)
A = fht(a, dln, mu, offset=offset, bias=gamma)
k = np.exp(offset)/r[::-1]
# analytical result
At = (2/k)**gamma * poch((mu+1-gamma)/2, gamma)
assert_allclose(A, At)
|
JelleZijlstra/cython
|
Cython/Compiler/Nodes.py
|
Python
|
apache-2.0
| 357,409
| 0.002781
|
#
# Parse tree nodes
#
from __future__ import absolute_import
import cython
cython.declare(sys=object, os=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object,
StructOrUnionScope=object, PyClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t, error_type=object, _py_int_types=object)
import sys, os, copy
from itertools import chain
if sys.version_info[0] >= 3:
_py_int_types = int
else:
_py_int_types = (int, long)
from . import Builtin
from .Errors import error, warning, InternalError, CompileError
from . import Naming
from . import PyrexTypes
from . import TypeSlots
from .PyrexTypes import py_object_type, error_type
from .Symtab import (ModuleScope, LocalScope, ClosureScope,
StructOrUnionScope, PyClassScope, CppClassScope, TemplateScope)
from .Code import UtilityCode
from .StringEncoding import EncodedString
from . import Future
from . import Options
from . import DebugFlags
from ..Utils import add_metaclass
absolute_path_length = 0
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regenerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length==0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
def _analyse_signature_annotation(annotation, env):
base_type = None
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
for name, value in annotation.key_value_pairs:
i
|
f not name.is_string_literal:
continue
|
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation")
arg_type = annotation.analyse_as_type(env)
if arg_type is not None:
if explicit_pytype and not explicit_ctype and not arg_type.is_pyobject:
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
base_type = CAnalysedBaseTypeNode(
annotation.pos, type=arg_type, is_arg=True)
else:
warning(annotation.pos, "Unknown type declaration found in signature annotation")
return base_type, arg_type
def write_func_call(func, codewriter_class):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], codewriter_class):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
# no code written => undo writing marker
code.buffer.stream.truncate(pristine)
else:
marker = marker.replace('->', '<-', 1)
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class CheckAnalysers(type):
"""Metaclass to check that type analysis functions return a node.
"""
methods = set(['analyse_types',
'analyse_expressions',
'analyse_target_types'])
def __new__(cls, name, bases, attrs):
from types import FunctionType
def check(name, func):
def call(*args, **kwargs):
retval = func(*args, **kwargs)
if retval is None:
print('%s %s %s' % (name, args, kwargs))
return retval
return call
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType) and mname in cls.methods:
attrs[mname] = check(mname, m)
return super(CheckAnalysers, cls).__new__(cls, name, bases, attrs)
def _with_metaclass(cls):
if DebugFlags.debug_trace_code_generation:
return add_metaclass(VerboseCodeWriter)(cls)
#return add_metaclass(CheckAnalysers)(cls)
return cls
@_with_metaclass
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
is_name = 0
is_none = 0
is_nonecheck = 0
is_literal = 0
is_terminator = 0
temps = None
# All descendants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists contain
|
nburn42/tensorflow
|
tensorflow/python/tools/optimize_for_inference_test.py
|
Python
|
apache-2.0
| 13,432
| 0.005435
|
# pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.graph_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops impo
|
rt gen_nn_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import optimize_for_inference_lib
class OptimizeForInferenceTest(test.TestCase):
def create_node_def(self, op, name, inputs):
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.
|
name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node
def create_constant_node_def(self, name, value, dtype, shape=None):
node = self.create_node_def("Const", name, [])
self.set_attr_dtype(node, "dtype", dtype)
self.set_attr_tensor(node, "value", value, dtype, shape)
return node
def set_attr_dtype(self, node, key, value):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(type=value.as_datatype_enum))
def set_attr_tensor(self, node, key, value, dtype, shape=None):
node.attr[key].CopyFrom(
attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto(
value, dtype=dtype, shape=shape)))
def testOptimizeForInference(self):
self.maxDiff = 1000
unused_constant_name = "unused_constant"
unconnected_add_name = "unconnected_add"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
unused_output_add_name = "unused_output_add"
graph_def = graph_pb2.GraphDef()
unused_constant = self.create_constant_node_def(
unused_constant_name, value=0, dtype=dtypes.float32, shape=[])
graph_def.node.extend([unused_constant])
unconnected_add_node = self.create_node_def(
"Add", unconnected_add_name,
[unused_constant_name, unused_constant_name])
self.set_attr_dtype(unconnected_add_node, "T", dtypes.float32)
graph_def.node.extend([unconnected_add_node])
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = self.create_node_def("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = self.create_node_def(
"Identity", a_identity_name, [a_constant_name, "^" + a_check_name])
graph_def.node.extend([a_identity_node])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = self.create_node_def("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = self.create_node_def(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = self.create_node_def("Add", add_name,
[a_identity_name, b_identity_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
unused_output_add_node = self.create_node_def("Add", unused_output_add_name,
[add_name, b_constant_name])
self.set_attr_dtype(unused_output_add_node, "T", dtypes.float32)
graph_def.node.extend([unused_output_add_node])
expected_output = graph_pb2.GraphDef()
a_constant = self.create_constant_node_def(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
b_constant = self.create_constant_node_def(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = self.create_node_def("Add", add_name,
[a_constant_name, b_constant_name])
self.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
output = optimize_for_inference_lib.optimize_for_inference(
graph_def, [], [add_name], dtypes.float32.as_datatype_enum)
self.assertProtoEquals(expected_output, output)
def testFoldBatchNorms(self):
with self.test_session() as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs), shape=[1, 1, 6, 2], dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op, weights_op, [1, 1, 1, 1], padding="SAME", name="conv_op")
mean_op = constant_op.constant(
np.array([10, 20]), shape=[2], dtype=dtypes.float32)
variance_op = constant_op.constant(
np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)
beta_op = constant_op.constant(
np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)
gamma_op = constant_op.constant(
np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)
test_util.set_producer_version(ops.get_default_graph(), 8)
gen_nn_ops._batch_norm_with_global_normalization(
conv_op,
mean_op,
variance_op,
beta_op,
gamma_op,
0.00001,
False,
name="output")
original_graph_def = sess.graph_def
original_result = sess.run(["output:0"])
optimized_graph_def = optimize_for_inference_lib.fold_batch_norms(
original_graph_def)
with self.test_session() as sess:
_ = importer.import_graph_def(
optimized_graph_def, input_map={}, name="optimized")
optimized_result = sess.run(["optimized/output:0"])
self.assertAllClose(original_result, optimized_result)
for node in optimized_graph_def.node:
self.assertNotEqual("BatchNormWithGlobalNormalization", node.op)
def testFoldFusedBatchNorms(self):
for data_format, use_gpu in [("NHWC", False), ("NCHW", True)]:
with self.test_session(use_gpu=use_gpu) as sess:
inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
input_op = constant_op.constant(
np.array(inputs),
shape=[1, 1, 6, 2] if data_format == "NHWC" else [1, 2, 1, 6],
dtype=dtypes.float32)
weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
weights_op = constant_op.constant(
np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
conv_op = nn_ops.conv2d(
input_op,
weights_op, [1, 1, 1, 1],
padding="SAME",
|
sandroandrade/emile-server
|
cruds/crud_wall_messages/models.py
|
Python
|
gpl-3.0
| 1,182
| 0.001692
|
import datetime
from backend import db
from cruds.crud_user_type_destinations.models import UserTypeDestinations
from cruds.crud_users.models import Users
from cruds import format_urls_in_text
class WallMessages(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
date = db.Column(
|
db.Integer)
sender = db.Column(db.Integer, db.ForeignKey("users.id"))
destination = db.Column(db.Integer, db.ForeignKey("user_type_desti
|
nations.id"))
param_value = db.Column(db.Integer())
message = db.Column(db.Text())
def set_fields(self, fields):
self.date = fields['date']
self.sender = fields['sender']
self.destination = fields['user_type_destination_id']
self.param_value = fields['parameter']
self.message = format_urls_in_text(fields['message'])
def get_sender(self):
return Users.query.filter_by(id=self.sender).all()
def get_destinations(self):
_dict = {}
query = UserTypeDestinations.query.filter_by(id=self.destination).first().users_query
query = str(query).replace('$', str(self.param_value))
exec(query, _dict)
return _dict['users']
|
Gabotero/GNURadioNext
|
gnuradio-runtime/python/gnuradio/gr/tag_utils.py
|
Python
|
gpl-3.0
| 1,719
| 0.004072
|
#
# Copyright 2003-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software
|
Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in t
|
he hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
""" Conversion tools between stream tags and Python objects """
import pmt
try:
from gnuradio import gr
except ImportError:
from runtime_swig import tag_t
class PythonTag(object):
" Python container for tags "
def __init__(self):
self.offset = None
self.key = None
self.value = None
self.srcid = None
def tag_to_python(tag):
""" Convert a stream tag to a Python-readable object """
newtag = PythonTag()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.to_python(tag.value)
newtag.srcid = pmt.to_python(tag.srcid)
return newtag
def tag_to_pmt(tag):
""" Convert a Python-readable object to a stream tag """
newtag = tag_t()
newtag.offset = tag.offset
newtag.key = pmt.to_python(tag.key)
newtag.value = pmt.from_python(tag.value)
newtag.srcid = pmt.from_python(tag.srcid)
return newtag
|
simeonf/sfpython
|
sfpython/jobs/migrations/0007_auto_20151115_0614.py
|
Python
|
apache-2.0
| 865
| 0.002312
|
# -*- coding: utf-8 -*-
from __future
|
__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0006_auto_20151115_0609'),
]
operations = [
|
migrations.AlterModelOptions(
name='job',
options={'ordering': ['order'], 'verbose_name': 'Job Details', 'verbose_name_plural': 'Job Postings'},
),
migrations.AddField(
model_name='job',
name='location',
field=models.CharField(default=b'', max_length=100),
),
migrations.AlterField(
model_name='job',
name='level',
field=models.IntegerField(default=0, choices=[(3500, b'Platinum'), (2000, b'Diamond'), (1500, b'Gold'), (1000, b'Silver'), (500, b'Bronze'), (0, b'None')]),
),
]
|
Rdbaker/Rank
|
rank/__init__.py
|
Python
|
mit
| 87
| 0
|
MAJOR = 1
MINOR = 0
PATCH =
|
0
__version__ = "{0}.{1}.{2}".format(MAJOR,
|
MINOR, PATCH)
|
idegtiarov/ceilometer
|
ceilometer/api/controllers/v2/base.py
|
Python
|
apache-2.0
| 8,636
| 0
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import datetime
import functools
import inspect
import json
from oslo_utils import strutils
from oslo_utils import timeutils
import pecan
import six
import wsme
from wsme import types as wtypes
from ceilometer.i18n import _
operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt')
operation_kind_enum = wtypes.Enum(str, *operation_kind)
class ClientSideError(wsme.exc.ClientSideError):
def __init__(self, error, status_code=400):
pecan.response.translatable_error = error
super(ClientSideError, self).__init__(error, status_code)
class EntityNotFound(ClientSideError):
def __init__(self, entity, id):
super(EntityNotFound, self).__init__(
_("%(entity)s %(id)s Not Found") % {'entity': entity,
'id': id},
status_code=404)
class ProjectNotAuthorized(ClientSideError):
def __init__(self, id, aspect='project'):
params = dict(aspect=aspect, id=id)
super(ProjectNotAuthorized, self).__init__(
_("Not Authorized to access %(aspect)s %(id)s") % params,
status_code=401)
class AdvEnum(wtypes.wsproperty):
"""Handle default and mandatory for wtypes.Enum."""
def __init__(self, name, *args, **kwargs):
self._name = '_advenum_%s' % name
self._default = kwargs.pop('default', None)
mandatory = kwargs.pop('mandatory', False)
enum = wtypes.Enum(*args, **kwargs)
super(AdvEnum, self).__init__(datatype=enum, fget=self._get,
fset=self._set, mandatory=mandatory)
def _get(self, parent):
if hasattr(parent, self._name):
value = getattr(parent, self._name)
return value or self._default
return self._default
def _set(self, parent, value):
try:
if self.datatype.validate(value):
setattr(parent, self._name, value)
except ValueError as e:
raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1),
value, e)
class Base(wtypes.DynamicBase):
@classmethod
def from_db_model(cls, m):
return cls(**(m.as_dict()))
@classmethod
def from_db_and_links(cls, m, links):
return cls(links=links, **(m.as_dict()))
def as_dict(self, db_model):
valid_keys = inspect.getargspec(db_model.__init__)[0]
if 'self' in valid_keys:
valid_keys.remove('self')
return self.as_dict_from_keys(valid_keys)
def as_dict_from_keys(self, keys):
return dict((k, getattr(self, k))
for k in keys
if hasattr(self, k) and
getattr(self, k) != wsme.Unset)
class Link(Base):
"""A link representation."""
href = wtypes.text
"The url of a link"
rel = wtypes.text
"The name of a link"
@classmethod
def sample(cls):
return cls(href=('http://localhost:8777/v2/meters/volume?'
'q.field=resource_id&'
'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
rel='volume'
)
class Query(Base):
"""Query filter."""
# The data types supported by the query.
_supported_types = ['integer', 'float', 'string', 'boolean', 'datetime']
# Functions to convert the data field to the correct type.
_type_converters = {'integer': int,
'float': float,
'boolean': functools.partial(
strutils.bool_from_string, strict=True),
'string': six.text_type,
'datetime': timeutils.parse_isotime}
_op = None # provide a default
def get_op(self):
return self._op or 'eq'
def set_op(self, value):
self._op = value
field = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the field to test"
# op = wsme.wsattr(operation_kind, default='eq')
# this ^ doesn't seem to work.
op = wsme.wsproperty(operation_kind_enum, get_op, set_op)
"The comparison operator. Defaults to 'eq'."
value = wsme.wsattr(wtypes.text, mandatory=True)
"The value to compare against the stored data"
type = wtypes.text
"The data type of value to compare against the stored data"
def __repr__(self):
# for logging calls
return '<Query %r %s %r %s>' % (self.field,
self.op,
self.value,
self.type)
@classmethod
def sample(cls):
return cls(field='resource_id',
op='eq',
value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
type='string'
)
def as_dict(self):
return self.as_dict_from_keys(['field', 'op', 'type', 'value'])
def _get_value_as_type(self, forced_type=None):
"""Convert metadata value to the specified data type.
This method is called during metadata query to help convert the
querying metadata to the data type specified by user. If there is no
data type given, the metadata will be parsed by ast.literal_eval to
try to do a smart converting.
NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised
from wsmeext/sphinxext.py. It's OK to call it outside the Query class.
Because the "public" side of that class is actually the outside of the
API, and the "private" side is the API implementation. The method is
only used in the API implementation, so it's OK.
:returns: metadata value converted with the specified data type.
"""
type = forced_type or self.type
try:
converted_value = self.value
if not type:
try:
converted_value = ast.literal_eval(self.value)
except (ValueError, SyntaxError):
# Unable to convert the metadata value
|
automatically
# let it default to self.value
pass
|
else:
if type not in self._supported_types:
# Types must be explicitly declared so the
# correct type converter may be used. Subclasses
# of Query may define _supported_types and
# _type_converters to define their own types.
raise TypeError()
converted_value = self._type_converters[type](self.value)
if isinstance(converted_value, datetime.datetime):
converted_value = timeutils.normalize_time(converted_value)
except ValueError:
msg = (_('Unable to convert the value %(value)s'
' to the expected data type %(type)s.') %
{'value': self.value, 'type': type})
raise ClientSideError(msg)
except TypeError:
msg = (_('The data type %(type)s is not supported. The supported'
' data type list is: %(supported)s') %
{'type': type, 'supported': self._supported_types})
raise ClientSideError(msg)
except Exception:
msg = (_('Unexpected exception co
|
yehzhang/RapidTest
|
examples/solutions/plus_one.py
|
Python
|
mit
| 400
| 0
|
class Solution(o
|
bject):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
for i in range(len(digits) - 1, -1, -1):
if
|
digits[i] < 9:
digits[i] += 1
return digits
digits[i] = 0
new_digits = [1]
new_digits.extend([0] * len(digits))
return new_digits
|
jupyter/jupyter-drive
|
setup.py
|
Python
|
bsd-2-clause
| 2,752
| 0.002544
|
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
#with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
setup(
name='jupyterdrive',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version
version='1.1.0',
description='Integration of IPython/Jupyter with Google drive',
long_description='',
# The project's main homepage.
url='https://github.com/jupyter/jupyter-drive',
# Author details
author='Matthias Bussonnier, Kester Tong, Kyle Kelley, Thomas Kluyver, The IPython team',
author_email='ipython-dev@scipy.org',
# Choose your license
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 -
|
Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indi
|
cate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: IPython',
],
# What does your project relate to?
keywords='ipython jupyter google drive notebook',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['notebook'],
# have to be included in MANIFEST.in as well.
package_data={
'jupyterdrive': [ '*.json',
'*.py',
'gdrive/*.js',
],
},
)
|
asifhj/Python_SOAP_OSSJ_SAP_Fusion_Kafka_Spark_HBase
|
KafkaCP.py
|
Python
|
apache-2.0
| 2,510
| 0.003586
|
__author__ = 'asifj'
import logging
from kafka import KafkaConsumer
import json
import traceback
from bson.json_util import dumps
from kafka import SimpleProducer, KafkaClient
from utils import Utils
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
inputs = []
consumer = KafkaConsumer("SAPEvent", bootstrap_servers=['172.22.147.242:9092', '172.22.147.232:9092', '172.22.147.243:9092'], auto_commit_enable=False, auto_offset_reset="smallest")
message_no = 1
inputs = consumer.fetch_messages()
'''for message in consumer:
topic = message.topic
partition = message.partition
offset = message.offset
key = message.key
message = message.value
print "================================================================================================================="
if message is not None:
try:
document = json.loads(message)
collection = document.keys()[0]
if collection == "customerMaster":
print "customerMaster"
elif collection == "srAttachements":
#print dumps(document, sort_keys=True)
inputs.append(document)
except Exception, err:
print "CustomException"
print "Kafka Message: "+str(message)
print(traceback.format_exc())
print "================================================================================================================="
print "\n"
message_no += 1
'''
# To send messages synchronously
kafka = KafkaClient('172.22.147.232:9092,172.22.147.242:9092,172.22.147.243:9092')
producer = SimpleProducer(kafka)
for i in inputs:
try:
#producer.send_messages(b'SAPEvent', json.dumps(input))
document = json.loads(str(i.value))
type = document.keys()[0]
|
if type == "srDetails":
print "+++++++++++++++++++++++++++++++++++++++++++++
|
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
row = []
utils = Utils()
row = utils.validate_sr_details( document['srDetails'], row)
print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
print "\n\n"
except Exception:
print "Kafka: "+str(document)
print Exception.message
print(traceback.format_exc())
|
Azure/azure-sdk-for-python
|
sdk/security/azure-mgmt-security/azure/mgmt/security/operations/_regulatory_compliance_controls_operations.py
|
Python
|
mit
| 9,157
| 0.004477
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class RegulatoryComplianceControlsOperations(object):
"""RegulatoryComplianceControlsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
|
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
|
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
regulatory_compliance_standard_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RegulatoryComplianceControlList"]
"""All supported regulatory compliance controls details and state for selected standard.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:param filter: OData filter. Optional.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegulatoryComplianceControlList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.security.models.RegulatoryComplianceControlList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceControlList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RegulatoryComplianceControlList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}/regulatoryComplianceControls'} # type: ignore
def get(
self,
regulatory_compliance_standard_name, # type: str
regulatory_compliance_control_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RegulatoryComplianceControl"
"""Selected regulatory compliance control details and state.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
:type regulatory_compliance_standard_name: str
:param regulatory_compliance_control_name: Name of the regulatory compliance control object.
:type regulatory_compliance_control_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegulatoryComplianceControl, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.RegulatoryComplianceControl
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegulatoryComplianceControl"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'regulatoryComplianceStandardName': self._serialize.url("regulatory_compliance_standard_name", regulatory_compliance_standard_name, 'str'),
'regulatoryComplianceControlName': self._serialize.url("regulatory_compliance_control_name", regulatory_compliance_control_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: D
|
openfisca/openfisca-france-extension-revenu-de-base
|
openfisca_france_extension_revenu_de_base/cotisations.py
|
Python
|
agpl-3.0
| 7,784
| 0.004758
|
# -*- coding: utf-8 -*-
from __future__ import division
from openfisca_core import reforms
from openfisca_france.model.base import FloatCol, Individus, Variable
# Build function
def build_reform(tax_benefit_system):
Reform = reforms.make_reform(
key = 'revenu_de_base_cotisations',
name = u"Réforme des cotisations pour un Revenu de base",
reference = tax_benefit_system,
)
class cotisations_contributives(Variable):
column = FloatCol
entity_class = Individus
label = u"Nouvelles cotisations contributives"
def function(self, simulation, period):
ags = simulation.calculate('ags', period)
agff_tranche_a_employeur = simulation.calculate('agff_tranche_a_employeur', period)
apec_employeur = simulation.calculate('apec_employeur', period)
arrco_tranche_a_employeur = simulation.calculate('arrco_tranche_a_employeur', period)
assedic_employeur = simulation.calculate('assedic_employeur', period)
cotisation_exceptionnelle_temporaire_employeur = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employeur', period)
fonds_emploi_hospitalier = simulation.calculate('fonds_emploi_hospitalier', period)
ircantec_employeur = simulation.calculate('ircantec_employeur', period)
pension_civile_employeur = simulation.calculate('pension_civile_employeur', period)
prevoyance_obligatoire_cadre = simulation.calculate('prevoyance_obligatoire_cadre', period)
rafp_employeur = simulation.calculate('rafp_employeur', period)
vieillesse_deplafonnee_employeur = simulation.calculate('vieillesse_deplafonnee_employeur', period)
vieillesse_plafonnee_employeur = simulation.calculate('vieillesse_plafonnee_employeur', period)
allocations_temporaires_invalidite = simulation.calculate('allocations_temporaires_invalidite', period)
accident_du_travail = simulation.calculate('accident_du_travail', period)
agff_tranche_a_employe = simulation.calculate('agff_tranche_a_employe', period)
agirc_tranche_b_employe = simulation.calculate('agirc_tranche_b_employe', period)
apec_employe = simulation.calculate('apec_employe', period)
arrco_tranche_a_employe = simulation.calculate('arrco_tranche_a_employe', period)
assedic_employe = simulation.calculate('assedic_employe', period)
cotisation_exceptionnelle_temporaire_employe = simulation.calculate(
'cotisation_exceptionnelle_temporaire_employe', period)
ircantec_employe = simulation.calculate('ircantec_employe', period)
pension_civile_employe = simulation.calculate('pension_civile_employe', period)
rafp_employe = simulation.calculate('rafp_employe', period)
vieillesse_deplafonnee_employe = simulation.calculate('vieillesse_deplafonnee_employe', period)
vieillesse_plafonnee_employe = simulation.calculate('vieillesse_plafonnee_employe', period)
cotisations_contributives = (
# cotisations patronales contributives dans le prive
ags +
agff_tranche_a_employeur +
apec_employeur +
arrco_tranche_a_employeur +
assedic_employeur +
|
cotisation_exceptionnelle_temporaire_employeur +
prevoyance_obligatoire_cadre + # TODO contributive ou pas
vieillesse_d
|
eplafonnee_employeur +
vieillesse_plafonnee_employeur +
# cotisations patronales contributives dans le public
fonds_emploi_hospitalier +
ircantec_employeur +
pension_civile_employeur +
rafp_employeur +
# anciennes cot patronales non-contributives classées ici comme contributives
allocations_temporaires_invalidite +
accident_du_travail +
# anciennes cotisations salariales contributives dans le prive
agff_tranche_a_employe +
agirc_tranche_b_employe +
apec_employe +
arrco_tranche_a_employe +
assedic_employe +
cotisation_exceptionnelle_temporaire_employe +
vieillesse_deplafonnee_employe +
vieillesse_plafonnee_employe +
# anciennes cotisations salariales contributives dans le public
ircantec_employe +
pension_civile_employe +
rafp_employe
)
return period, cotisations_contributives
class nouv_salaire_de_base(Variable):
reference = tax_benefit_system.column_by_name['salaire_de_base']
# Le salaire brut se définit dans la réforme comme le salaire super-brut auquel
# on retranche les cotisations contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
salsuperbrut = simulation.calculate('salsuperbrut', period)
cotisations_contributives = simulation.calculate('cotisations_contributives', period)
nouv_salaire_de_base = (
salsuperbrut -
cotisations_contributives
)
return period, nouv_salaire_de_base
class nouv_csg(Variable):
reference = tax_benefit_system.column_by_name['csg_imposable_salaire']
# On applique une CSG unique à 22,5% qui finance toutes les prestations non-contributives
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = (
-0.225 * nouv_salaire_de_base
)
return period, nouv_csg
class salaire_net(Variable):
reference = tax_benefit_system.column_by_name['salaire_net']
# On retire la nouvelle CSG (pas celle qui finance le RDB) pour trouver le nouveau salaire net
def function(self, simulation, period):
period = period.start.period('month').offset('first-of')
nouv_salaire_de_base = simulation.calculate('nouv_salaire_de_base', period)
nouv_csg = simulation.calculate('nouv_csg', period)
salaire_net = (
nouv_salaire_de_base +
nouv_csg
)
return period, salaire_net
class salaire_imposable(Variable):
reference = tax_benefit_system.column_by_name['salaire_imposable']
# Nous sommes partis du nouveau salaire net et par rapport au salaire imposable actuel,
# nous avons supprimé : les heures sup, la déductibilité de CSG
def function(self, simulation, period):
period = period
hsup = simulation.calculate('hsup', period)
salaire_net = simulation.calculate('salaire_net', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
rev_microsocial_declarant1 = simulation.calculate('rev_microsocial_declarant1', period)
return period, (
salaire_net +
primes_fonction_publique +
indemnite_residence +
supp_familial_traitement +
hsup +
rev_microsocial_declarant1
)
return Reform()
|
Endika/account-invoice-reporting
|
base_comment_template/__openerp__.py
|
Python
|
agpl-3.0
| 1,121
| 0
|
# -*- coding: utf-8 -*-
#
#
# Author: Nicolas Bessi
# Copyright 2013-2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Softwa
|
re Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.o
|
rg/licenses/>.
#
#
{"name": "Base Comments Templates",
"summary": "Comments templates on documents",
"version": "8.0.1.0.0",
"depends": ["base"],
"author": "Camptocamp,Odoo Community Association (OCA)",
"data": ["comment_view.xml",
'security/ir.model.access.csv',
],
"category": "Sale",
"installable": True,
"active": False, }
|
memsql/memsql-loader
|
memsql_loader/loader_db/storage.py
|
Python
|
apache-2.0
| 2,959
| 0.000676
|
import contextlib
import gc
import multiprocessing
import os
from memsql_loader.util.apsw_storage import APSWStorage
from memsql_loader.util import paths
MEMSQL_LOADER_DB = 'memsql_loader.db'
def get_loader_db_path():
return os.path.join(paths.get_data_dir(), MEMSQL_LOADER_DB)
# IMPORTANT NOTE: This clas
|
s cannot be shared across forked processes unless
# you use fork_wrapper.
class LoaderStorage(APSWStorage):
_instance = None
_initialized = False
_instance_lock = multiprocessing.RLock()
# We use LoaderStorage as a singleton.
def __new__(cls, *args, **kwargs):
with cls._instance_lock:
if cls._instance is None:
cls._instance = super(LoaderStorage, cls).__new__(
cls, *args, **kwargs)
cls._init
|
ialized = False
return cls._instance
@classmethod
def drop_database(cls):
with cls._instance_lock:
if os.path.isfile(get_loader_db_path()):
os.remove(get_loader_db_path())
if os.path.isfile(get_loader_db_path() + '-shm'):
os.remove(get_loader_db_path() + '-shm')
if os.path.isfile(get_loader_db_path() + '-wal'):
os.remove(get_loader_db_path() + '-wal')
cls._instance = None
@classmethod
@contextlib.contextmanager
def fork_wrapper(cls):
# This context manager should be used around any code that forks new
# processes that will use a LoaderStorage object (e.g. Worker objects).
# This ensures that we don't share SQLite connections across forked
# processes.
with cls._instance_lock:
if cls._instance is not None:
cls._instance.close_connections()
# We garbage collect here to clean up any SQLite objects we
# may have missed; this is important because any surviving
# objects post-fork will mess up SQLite connections in the
# child process. We use generation=2 to collect as many
# objects as possible.
gc.collect(2)
yield
with cls._instance_lock:
if cls._instance is not None:
cls._instance.setup_connections()
def __init__(self):
with LoaderStorage._instance_lock:
# Since this is a singleton object, we don't want to call the
# parent object's __init__ if we've already instantiated this
# object in __new__. However, we may have closed this object's
# connections in fork_wrapper above; in that case, we want to set
# up new database connections.
if not LoaderStorage._initialized:
super(LoaderStorage, self).__init__(get_loader_db_path())
LoaderStorage._initialized = True
return
elif not self._db or not self._db_t:
self.setup_connections()
|
datagutten/comics
|
comics/comics/exiern.py
|
Python
|
agpl-3.0
| 784
| 0
|
from comics.aggregato
|
r.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = 'Exiern'
language = 'en'
url = 'http://www.exiern.com/'
start_date = '2005-09-06'
rights = 'Dan Standing'
class Crawler(CrawlerBase):
history_capable_days = 30
schedule = 'Tu,Th'
time_zone = 'US/Eastern'
def crawl(self, pub_date):
feed = self.parse_feed('http://www.exiern.com/?feed=rss2')
for entry in
|
feed.for_date(pub_date):
url = entry.summary.src('img', allow_multiple=True)
if url:
url = url[0]
url = url.replace('comics-rss', 'comics')
title = entry.title
return CrawlerImage(url, title)
|
djurodrljaca/tuleap-rest-api-client
|
Tuleap/RestClient/PullRequests.py
|
Python
|
lgpl-3.0
| 6,833
| 0.002927
|
"""
Created on 04.07.2017
:author: Humbert Moreaux
Tuleap REST API Client for Python
Copyright (c) Humbert Moreaux, All rights reserved.
This Python module is fr
|
ee software; you can redistribute it and/or modify it under the terms of the
GNU Lesser General Public License as published by the Free Software Foundation; either version 3.0
of the License, or (at your option) any later version.
This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Pu
|
blic License for more details.
You should have received a copy of the GNU Lesser General Public License along with this library. If
not, see <http://www.gnu.org/licenses/>.
"""
import json
# Public -------------------------------------------------------------------------------------------
class PullRequests(object):
"""
Handles "/pull_requests" methods of the Tuleap REST API.
Fields type information:
:type _connection: Tuleap.RestClient.Connection.Connection
:type _data: dict | list[dict]
"""
def __init__(self, connection):
"""
Constructor
:param connection: connection object (must already be logged in)
:type connection: Tuleap.RestClient.Connection.Connection
"""
self._connection = connection
self._data = None
def get_data(self):
"""
Get data received in the last response message.
:return: Response data
:rtype: dict | list[dict]
:note: One of the request method should be successfully executed before this method is
called!
"""
return self._data
def request_pull_request(self, pull_request_id):
"""
Request pull request data from the server using the "/pull_requests" method of the Tuleap REST
API.
:param int pull_request_id: Pull request ID
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request
relative_url = "/pull_requests/{:}".format(pull_request_id)
success = self._connection.call_get_method(relative_url)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_comments(self, pull_request_id, limit=10, offset=None):
"""
Request pull request comments using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:param int limit: Optional parameter for maximum limit of returned projects
:param int offset: Optional parameter for start index for returned projects
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request comments
relative_url = "/pull_requests/{:}/comments".format(pull_request_id)
parameters = dict()
if limit is not None:
parameters["limit"] = limit
if offset is not None:
parameters["offset"] = offset
success = self._connection.call_get_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_file_diff(self, pull_request_id, path):
"""
Request pull request diff of a given file using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:param path: File path
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request diff of a given file
relative_url = "/pull_requests/{:}/file_diff".format(pull_request_id)
parameters = dict()
parameters["path"] = path
success = self._connection.call_get_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def request_files(self, pull_request_id):
"""
Request pull request files using the "/pull_requests" method of the Tuleap REST API.
:param pull_request_id: Pull request ID
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get pull request files
relative_url = "/pull_requests/{:}/files".format(pull_request_id)
success = self._connection.call_get_method(relative_url)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def create_pull_request(self, repository_id, branch_src, repository_dest_id, branch_dest):
"""
Create a pull request from the server using the "/pull_requests" method of the REST API.
:param int repository_id: Repository ID
:param string branch_src: Branch source name
:param int repository_dest_id: Destination repository ID
:param string branch_dest: Destination repository name
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Create a pull request
relative_url = "/pull_requests"
parameters = dict()
if repository_id and branch_src and repository_dest_id and branch_dest:
parameters["content"] = {
"repository_id": repository_id,
"branch_src": branch_src,
"repository_dest_id": repository_dest_id,
"branch_dest": branch_dest,
}
else:
raise Exception("Error: invalid content values")
success = self._connection.call_post_method(relative_url, parameters)
# parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
def get_last_response_message(self):
"""
Get last response message.
:return: Last response message
:rtype: requests.Response
:note: This is just a proxy to the connection's method.
"""
self._connection.get_last_response_message()
|
uraplutonium/adtree-py
|
src/BayesUpdating.py
|
Python
|
gpl-2.0
| 2,726
| 0.005869
|
# Add any code that updates the current probability
# values of any of the nodes here.
# For example, here is a method that updates the probability of
# a single node, where this node is assumed to have a single parent.
def update_node_with_one_parent(n):
'''
For all possible values pv of the current node,
For all possible values ppv of the parent,
Look up the conditional probability of pv given ppv.
and multiply it by the current prob. of that parent state (ppv)
and accumulate these to get the current probability of pv.
'''
if len(n.parents)!= 1:
print "The function update_node_with_one_parent cannot handle node "+n.name
print "It does not have exactly one parent."
return
parent = n.parents[0]
for pv in n.possible_values:
n.current_prob[pv] = 0.0
for ppv in n.parents[0].possible_values:
conditional = n.name+'='+str(pv)+'|'+parent.name+'='+str(ppv)
n.current_prob[pv] += n.p[conditional] * parent.current_prob[ppv]
def gen_cartesian_product(sets):
'''Return the cartesian product of a list of sets.
For example: [['a','b'],[0,1],[7,8,9]] should give a 12 element set of triples.'''
if len(sets)==1:
return map(lambda set: [set], sets[0])
subproduct = gen_cartesian_product(sets[1:])
prod = []
for elt in sets[0]:
new_tuples = map(lambda tup: [elt]+tup, subproduct)
prod = prod + new_tuples
return prod
def update_node_with_k_parents(n):
'''
For all possible values pv of the current node,
For all possible values ppv of each of the parents,
Look up the conditional probability of pv given ppv.
and multiply it by the current prob. of that parent state (ppv)
and accumulate these to get the current probability of pv.
'''
print "Updating node: "+n.name
if len(n.parents) < 1:
print "The function update_node_with_k_parents cannot handle node "+n.name
print "It does not have any parents."
return
cartesian_prod = gen_cartesian_product(map(lambda p: p.possible_values, n.parents))
parent_names = map(lambda p: p.name, n.parents)
for pv in n.possible_values:
n.current
|
_prob[pv] = 0.0
print " Updating current prob. of "+pv
for ppv_tuple in cartesian_prod:
print " Adding the contribution for "+str(ppv_tuple)
conditional = n.name+'='+pv+'|'+str(parent_names) +'='+str(ppv_tuple)
parent_vector_prob = reduce(lambda a,b:a*b, map(lambda p, pv:p.current_prob[pv], n.paren
|
ts, ppv_tuple))
n.current_prob[pv] += n.p[conditional] * parent_vector_prob
#update_node_with_one_parent(nodeB)
|
datalogics/scons
|
test/Java/JAVAC.py
|
Python
|
mit
| 2,542
| 0.003934
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
|
LIMITED TO THE
# WARRANT
|
IES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test setting the JAVAC variable.
"""
import os
import os.path
import string
import sys
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myjavac.py', r"""
import sys
args = sys.argv[1:]
while args:
a = args[0]
if a == '-d':
args = args[1:]
elif a == '-sourcepath':
args = args[1:]
else:
break
args = args[1:]
for file in args:
infile = open(file, 'rb')
outfile = open(file[:-5] + '.class', 'wb')
for l in infile.readlines():
if l[:9] != '/*javac*/':
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(tools = ['javac'],
JAVAC = r'%(_python_)s myjavac.py')
env.Java(target = '.', source = '.')
""" % locals())
test.write('test1.java', """\
test1.java
/*javac*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test1.class', "test1.java\nline 3\n")
if os.path.normcase('.java') == os.path.normcase('.JAVA'):
test.write('SConstruct', """\
env = Environment(tools = ['javac'],
JAVAC = r'%(_python_)s myjavac.py')
env.Java(target = '.', source = '.')
""" % locals())
test.write('test2.JAVA', """\
test2.JAVA
/*javac*/
line 3
""")
test.run(arguments = '.', stderr = None)
test.must_match('test2.class', "test2.JAVA\nline 3\n")
test.pass_test()
|
wolsen/secret-santa
|
secretsanta/mail.py
|
Python
|
mit
| 4,308
| 0.001161
|
#!/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Billy Olsen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from email.mime.text import MIMEText
from jinja2 import Environment, FileSystemLoader
from datetime import datetime as dt
import os
import six
import smtplib
# Get the directory for this file.
SECRET_SANTA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'templates')
j2env = Environment(loader=FileSystemLoader(SECRET_SANTA_DIR),
trim_blocks=False)
class SantaMail(object):
"""
The SantaMail object is used to send email. This class will load email
templates that should be sent out (the master list email and the email
for each Secret Santa.
Templates will be loaded from the template directory and is configurable
via the template_master and template_santa configuration variables.
"""
REQUIRED_PARAMS = ['author', 'email', 'smtp
|
', 'username', 'password']
def __init__(self, author, email, smtp, username, password,
template_master="master.tmpl", template_santa="santa.tmpl"):
|
self.author = author
self.email = email
self.smtp = smtp
self.username = username
self.password = password
self.template_master = template_master
self.template_santa = template_santa
def send(self, pairings):
"""
Sends the emails out to the secret santa participants.
The secret santa host (the user configured to send the email from)
will receive a copy of the master list.
Each Secret Santa will receive an email with the contents of the
template_santa template.
"""
for pair in pairings:
self._send_to_secret_santa(pair)
self._send_master_list(pairings)
def _do_send(self, toaddr, body, subject):
try:
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.email
msg['To'] = toaddr
server = smtplib.SMTP(self.smtp)
server.starttls()
server.login(self.username, self.password)
server.sendmail(self.email, [toaddr], msg.as_string())
server.quit()
except:
print("Error sending email to %s!" % toaddr)
def _send_to_secret_santa(self, pair):
"""
Sends an email to the secret santa pairing.
"""
(giver, receiver) = pair
template = j2env.get_template(self.template_santa)
body = template.render(giver=giver, receiver=receiver)
year = dt.utcnow().year
subject = ('Your %s Farmer Family Secret Santa Match' % year)
self._do_send(giver.email, body, subject)
def _send_master_list(self, pairings):
"""
Sends an email to the game master.
"""
pair_list = []
for pair in pairings:
(giver, recipient) = pair
pair_list.append("%s -> %s" % (giver.name, recipient.name))
template = j2env.get_template(self.template_master)
body = template.render(pairs=pair_list)
year = dt.utcnow().year
subject = ('%s Farmer Family Secret Santa Master List' % year)
self._do_send(self.email, body, subject)
|
moshthepitt/answers
|
questions/migrations/0014_auto_20160210_0406.py
|
Python
|
mit
| 573
| 0
|
# -*- coding: utf-
|
8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0013_auto_20160210_0400'),
]
operations = [
migrations.AlterField(
model_name='category',
n
|
ame='order',
field=models.PositiveIntegerField(default=1),
),
migrations.AlterField(
model_name='question',
name='order',
field=models.PositiveIntegerField(default=1),
),
]
|
bdcht/amoco
|
tests/test_arch_tricore.py
|
Python
|
gpl-2.0
| 918
| 0.037037
|
import pytest
from amoco.config import conf
conf.UI.formatter = 'Null'
conf.Cas.unicode = False
conf.UI.unicode = False
from amoco.arch.tricore import cpu
def test_decoder_START():
c = b'\x91\x00\x00\xf8'
i = cpu.disassemble(c)
ass
|
ert i.mnemonic=='MOVH_A'
assert i.operands[0] is cpu.A[15]
assert i.operands[1]==0x8000
c = b'\xd9\xff\x14\x02'
i = cpu.disassemble(c)
assert i.mnemonic=="LEA"
assert i.mode=="Long-offset"
assert i.ope
|
rands[2]==0x2014
c = b'\xdc\x0f'
i = cpu.disassemble(c)
assert i.mnemonic=="JI"
assert i.operands[0]==cpu.A[15]
c = b'\x00\x90'
i = cpu.disassemble(c)
assert i.mnemonic=="RET"
c = b'\x00\x00'
i = cpu.disassemble(c)
assert i.mnemonic=="NOP"
def test_decoder_ldw():
c = b'\x19\xf0\x10\x16'
i = cpu.disassemble(c)
assert str(i)=="ld.w d0 , a15, 0x6050"
def test_movh():
c = b'\x7b\xd0\x38\xf1'
i = cpu.disassemble(c)
|
xiangke/pycopia
|
QA/pycopia/reports/__init__.py
|
Python
|
lgpl-2.1
| 17,925
| 0.004965
|
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <keith@kdart.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
Various test reports and formatters are defined here. These are used for
unit test and test framework reporting.
Generally, you don't use this package in the normal way. Instead, you call
the 'get_report' function in this mo
|
dule with a particular pattern of
paramters and it will return a report
|
object according to that. Any
necessary report objects and modules are specified there, and imported as
necessary.
e.g.:
get_report( ("StandardReport", "reportfile", "text/plain") )
Note that the argument is a single tuple. A list of these may be supplied
for a "stacked" report.
The first argument is a report object name (plus module, if necessary).
Any remaining argumments in the tuple are passed to the specified reports
constructor.
"""
__all__ = ['ANSI', 'Eventlog', 'Curses', 'Html', 'Email']
import sys, os
from pycopia import UserFile
from pycopia import timelib
NO_MESSAGE = "no message"
# map mime type to formatter class name and file extension
_FORMATTERS = {
None: ("StandardFormatter", "txt"), # default
"text/plain": ("StandardFormatter", "txt"), # plain text
"text/ascii": ("StandardFormatter", "asc"), # plain text
"text/html": ("pycopia.reports.Html.XHTMLFormatter", "html"), # HTML
"text/ansi": ("pycopia.reports.ANSI.ANSIFormatter", "ansi"), # text with ANSI-term color escapes
"text/ansi; charset=utf8": ("pycopia.reports.utf8ANSI.UTF8Formatter", "ansi"),
}
# register another formatter object that adheres to the NullFormatter
# interface.
def register_formatter(mimetype, classpath, fileextension):
global _FORMATTERS
_FORMATTERS[mimetype] = (classpath, fileextension)
class ReportError(Exception):
pass
class ReportFindError(ReportError):
pass
class BadReportError(ReportError):
pass
class NullFormatter(object):
def title(self, title):
return ""
def heading(self, text, level=1):
return ""
def paragraph(self, text, level=1):
return ""
def summaryline(self, line):
return ""
def message(self, msgtype, msg, level=1):
return ""
def passed(self, msg=NO_MESSAGE, level=1):
return self.message("PASSED", msg, level)
def failed(self, msg=NO_MESSAGE, level=1):
return self.message("FAILED", msg, level)
def expectedfail(self, msg=NO_MESSAGE, level=1):
return self.message("EXPECTED_FAIL", msg, level)
def incomplete(self, msg=NO_MESSAGE, level=1):
return self.message("INCOMPLETE", msg, level)
def abort(self, msg=NO_MESSAGE, level=1):
return self.message("ABORT", msg, level)
def info(self, msg, level=1):
return self.message("INFO", msg, level)
def diagnostic(self, msg, level=1):
return self.message("DIAGNOSTIC", msg, level)
def text(self, text):
return text
def analysis(self, text):
return text
def url(self, text, url):
return ""
def page(self):
return ""
def endpage(self):
return ""
def section(self):
return ""
def endsection(self):
return ""
def initialize(self, *args):
return ""
def finalize(self):
return ""
class NullReport(object):
"""NullReport defines the interface for report objects. It is the base
class for all Report objects."""
# overrideable methods
def write(self, text):
raise NotImplementedError, "override me!"
def writeline(self, text=""):
raise NotImplementedError, "override me!"
def writelines(self, lines):
raise NotImplementedError, "override me!"
filename = property(lambda s: None)
filenames = property(lambda s: [])
def initialize(self, config=None): pass
def logfile(self, filename): pass
def finalize(self): pass
def add_title(self, title): pass
def add_heading(self, text, level=1): pass
def add_message(self, msgtype, msg, level=1): pass
def add_summary(self, entries): pass
def add_text(self, text): pass
def add_analysis(self, text): pass
def add_data(self, data, datatype, note=None): pass
def add_url(self, text, url): pass
def passed(self, msg=NO_MESSAGE, level=1): pass
def failed(self, msg=NO_MESSAGE, level=1): pass
def expectedfail(self, msg=NO_MESSAGE, level=1): pass
def incomplete(self, msg=NO_MESSAGE, level=1): pass
def abort(self, msg=NO_MESSAGE, level=1): pass
def info(self, msg, level=1): pass
def diagnostic(self, msg, level=1): pass
def newpage(self): pass
def newsection(self): pass
class DebugReport(NullReport):
"""Used for debugging tests and reports. Just emits plain messages.
"""
# overrideable methods
def write(self, text):
raise NotImplementedError, "override me!"
def writeline(self, text=""):
raise NotImplementedError, "override me!"
def writelines(self, lines):
raise NotImplementedError, "override me!"
filename = property(lambda s: "")
filenames = property(lambda s: [])
def initialize(self, config=None):
print "initialize: %r" % (config,)
def logfile(self, filename):
print "logfile:", filename
def finalize(self):
print "finalize"
def add_title(self, title):
print "add_title:", title
def add_heading(self, text, level=1):
print "add_heading:", repr(text), level
def add_message(self, msgtype, msg, level=1):
print "add_message:", msgtype, repr(msg), level
def add_summary(self, entries):
print "add_summary"
def add_text(self, text):
print "add_text"
def add_analysis(self, text):
print "add_analysis"
def add_data(self, data, datatype, note=None):
print "add_data type: %s note: %s" % (datatype, note)
def add_url(self, text, url):
print "add_url:", repr(text), repr(url)
def passed(self, msg=NO_MESSAGE, level=1):
print "passed:", repr(msg), level
def failed(self, msg=NO_MESSAGE, level=1):
print "failed:", repr(msg), level
def expectedfail(self, msg=NO_MESSAGE, level=1):
print "expected fail:",repr(msg), level
def incomplete(self, msg=NO_MESSAGE, level=1):
print "incomplete:", repr(msg), level
def abort(self, msg=NO_MESSAGE, level=1):
print "abort:", repr(msg), level
def info(self, msg, level=1):
print "info:", repr(msg), level
def diagnostic(self, msg, level=1):
print "diagnostic:", repr(msg), level
def newpage(self):
print "newpage"
def newsection(self):
print "newsection"
class StandardReport(UserFile.FileWrapper, NullReport):
"""StandardReport writes to a file or file-like object, such as stdout. If
the filename specified is "-" then use stdout. """
def __init__(self, name=None, formatter=None):
self._do_close = 0
self._formatter, self.fileext = get_formatter(formatter)
if type(name) is str:
if name == "-":
fo = sys.stdout
else:
name = "%s.%s" % (name, self.fileext)
fo = open(os.path.expanduser(os.path.expandvars(name)), "w")
self._do_close = 1
elif name is None:
fo = sys.stdout
else:
fo = name # better be a file object
UserFile.FileWrapper.__init__(self, fo)
filename = property(lambda s: s._fo.name)
filenames = property(lambda s: [s._fo.name])
def initialize(self, config=None):
self.write(self._formatter.initialize())
|
altai/altai-api
|
altai_api/exceptions.py
|
Python
|
lgpl-2.1
| 4,527
| 0.000221
|
# vim: tabstop=8 shiftwidth=4 softtabstop=4 expandtab smarttab autoindent
# Altai API Service
# Copyright (C) 2012-2013 Grid Dynamics Consulting Services, Inc
# All Rights Reserved
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import traceback
class AltaiApiException(Exception):
def __init__(self, message, status_code, reason=None, exc_type=None):
super(AltaiApiException, self).__init__(message)
self.status_code = status_code
self.reason = reason
if exc_type is not None:
self.exc_type = exc_type
else:
self.exc_type = self.__class__.__name__
def get_response_object(self):
lines = traceback.format_exception_only(type(self), self)
result = {
'message': '\n'.join(lines),
'error-type': self.exc_type
}
if self.reason:
result['reason'] = self.reason
return result
class InvalidRequest(AltaiApiException):
"""Exception raised on invalid requests"""
def __init__(self, message, reason=None):
super(InvalidRequest, self).__init__(message, 400, reason)
class InvalidElement(InvalidRequest):
def __init__(self, message, name, reason=None):
super(InvalidElement, self).__init__(message, reason)
self.name = name
def get_response_object(self):
rv = super(InvalidElement, self).get_response_object()
rv['element-name'] = self.name
return rv
class UnknownElement(InvalidElement):
"""Exception raised when unknown elements are present in response"""
def __init__(self, name, reason=None):
super(UnknownElement, self).__init__(
'Unknown resource element: %r' % name, name, reason)
class MissingElement(InvalidElement):
"""Exception raised when required request elements are missing"""
def __init__(self, name, reason=None):
super(MissingElement, self).__init__(
'Required element is missing: %r' % name, name, reason)
class InvalidElementValue(InvalidElement):
"""Exception raised when request element has illegal value"""
def __init__(self, name, typename, value, reason=None):
msg = 'Invalid value for element %s of type %s: %r' \
% (name, typename, value)
super(InvalidElementValue, self).__init__(msg, name, reason)
self.typename = typename
self.value = value
def get_response_object(self):
rv = super(InvalidElementValue, self).get_response_object()
rv['element-value'] = self.value
rv['element-type'] = self.typename
return rv
class InvalidArgument(InvalidRequest):
"""Exception raised when invalid argument is supplied for request"""
def __init__(self, message, name, reason=None):
super(InvalidArgument, self).__init__(message, reason)
self.name = name
def get_response_object(self):
rv = super(InvalidArgument, self).get_response_object()
rv['argument-name'] = self.name
return rv
class UnknownArgument(InvalidArgument):
"""Exception raised when unknown arguments are present in
|
request"""
def __init__(self, name, reason=None):
super(UnknownArgument, self).__init__(
'Unknown request argument: %r' % name, name, reason)
class InvalidArgumentValue(InvalidArgument):
"""Exception raised when some client inpu
|
t has illegal value"""
def __init__(self, name, typename, value, reason=None):
msg = 'Invalid value for argument %s of type %s: %r' \
% (name, typename, value)
super(InvalidArgumentValue, self).__init__(msg, name, reason)
self.typename = typename
self.value = value
def get_response_object(self):
rv = super(InvalidArgumentValue, self).get_response_object()
rv['argument-value'] = self.value
rv['argument-type'] = self.typename
return rv
|
MrSurly/micropython
|
tests/basics/class_inplace_op2.py
|
Python
|
mit
| 1,293
| 0.000773
|
# Test inplace special methods enabled by MICROPY_PY_ALL_INPLACE_SPECIAL_METHODS
class A:
def __imul__(self, other):
print("__imul__")
return self
def __imatmul__(self, other):
print
|
("__imatmul__")
return self
def __ifloordiv__(self, other):
print("__ifloordiv__")
return self
def __itruediv__(self, other):
print("__itruediv__")
return self
def __imod__(self, other):
print("__imod__")
return self
|
def __ipow__(self, other):
print("__ipow__")
return self
def __ior__(self, other):
print("__ior__")
return self
def __ixor__(self, other):
print("__ixor__")
return self
def __iand__(self, other):
print("__iand__")
return self
def __ilshift__(self, other):
print("__ilshift__")
return self
def __irshift__(self, other):
print("__irshift__")
return self
a = A()
try:
a *= None
except TypeError:
print("SKIP")
raise SystemExit
a @= None
a //= None
a /= None
a %= None
a **= None
a |= None
a ^= None
a &= None
a <<= None
a >>= None
# Normal operator should not fallback to inplace operator
try:
a * None
except TypeError:
print("TypeError")
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/FlAv/PyScripts/Lib/flav/flav/cmd/flavcontrol/type_Result.py
|
Python
|
unlicense
| 4,626
| 0.002162
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Result.py
from types import *
class AvailableResult:
def __init__(self):
self.__dict__['available'] = False
def __getattr__(self, name):
if name == 'available':
return self.__dict__['available']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'available':
self.__dict__['available'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddBool(MSG_KEY_RESULT_AVAILABLE_AVAILABLE, self.__dict__['available'])
mmsg.AddMessage(MSG_KEY_RESULT_AVAILABLE, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_AVAILABLE, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['available'] = submsg.FindBool(MSG_KEY_RESULT_AVAILABLE_AVAILABLE)
class StatusResult:
def __init__(self):
self.__dict__['major'] = 0
self.__dict__['minor'] = 0
self.__dict__['fix'] = 0
self.__dict__['build'] = 0
self.__dict__['available'] = False
def __getattr__(self, name):
if name == 'major':
return self.__dict__['major']
if name == 'minor':
return self.__dict__['minor']
if name == 'fix':
return self.__dict__['fix']
if name == 'build':
return self.__dict__['build']
if name == 'available':
return self.__dict__['available']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'major':
self.__dict__['major'] = value
elif name == 'minor':
self.__dict__['minor'] = v
|
alue
elif name == 'fix':
self.__dict__['fix'] = value
elif name == 'build':
self.__
|
dict__['build'] = value
elif name == 'available':
self.__dict__['available'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_RESULT_STATUS_MAJOR, self.__dict__['major'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_MINOR, self.__dict__['minor'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_FIX, self.__dict__['fix'])
submsg.AddU32(MSG_KEY_RESULT_STATUS_BUILD, self.__dict__['build'])
submsg.AddBool(MSG_KEY_RESULT_STATUS_AVAILABLE, self.__dict__['available'])
mmsg.AddMessage(MSG_KEY_RESULT_STATUS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STATUS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['major'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_MAJOR)
self.__dict__['minor'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_MINOR)
self.__dict__['fix'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_FIX)
self.__dict__['build'] = submsg.FindU32(MSG_KEY_RESULT_STATUS_BUILD)
self.__dict__['available'] = submsg.FindBool(MSG_KEY_RESULT_STATUS_AVAILABLE)
class StringResult:
def __init__(self):
self.__dict__['str'] = ''
def __getattr__(self, name):
if name == 'str':
return self.__dict__['str']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'str':
self.__dict__['str'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddStringUtf8(MSG_KEY_RESULT_STRING_VALUE, self.__dict__['str'])
mmsg.AddMessage(MSG_KEY_RESULT_STRING, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_RESULT_STRING, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['str'] = submsg.FindString(MSG_KEY_RESULT_STRING_VALUE)
|
pprivulet/DataScience
|
Dic/getDetail.py
|
Python
|
apache-2.0
| 3,537
| 0.015653
|
# -*- coding:utf-8 -*-
#html_doc = '''<div><a href="http://www.weblio.jp/content/%E5%BD%A2%E5%AE%B9%E5%8B%95%E8%A9%9E" title="形容動詞の意味" class=crosslink>形容動詞</a>「<a href=
|
"http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャルの意味" class=crosslink>アーティフィシャル</a>だ」が、<a href="http://www.weblio.jp/content/%E6%8E%A5%E5%B0%BE%E8%AA%9E" title="接尾語の意味" class=crosslink>接尾語</a>「さ」により<a href="http://www.weblio.jp/content/%E4%BD%93%E8%A8%80" title="体言の意
|
味" class=crosslink>体言</a>化した形。<br><br class=nhgktD><div><!--AVOID_CROSSLINK--><p class=nhgktL>終止形</p><p class=nhgktR>アーティフィシャルだ <a href="http://www.weblio.jp/content/%E3%82%A2%E3%83%BC%E3%83%86%E3%82%A3%E3%83%95%E3%82%A3%E3%82%B7%E3%83%A3%E3%83%AB" title="アーティフィシャル">» 「アーティフィシャル」の意味を調べる</a></p><!--/AVOID_CROSSLINK--><br class=clr></div>'''
#from bs4 import BeautifulSoup
#soup = BeautifulSoup(html_doc, 'html.parser')
#a = [text for text in soup.stripped_strings]
#print ''.join(a[:-1])
import socket
import urllib2
import traceback
import re
#import MySQLdb
import time
from bs4 import BeautifulSoup
#from complainDetail import *
timeout = 10
socket.setdefaulttimeout(timeout)
def fetchDetail(link, word):
tryNum = 3
tn = 0
while tn < tryNum:
details = []
try:
f = urllib2.urlopen(link)
content = f.read()
soup = BeautifulSoup(content, 'html.parser')
main = soup.find(attrs={'class':'Nhgkt'})
left = soup.find_all(attrs={'class':'nhgktL'})
right = soup.find_all(attrs={'class':'nhgktR'})
if(left):
for text in main.stripped_strings:
if(re.match(u'終止形$', text)!=None):break
details.append(text)
print '#'.join(details).encode('utf8'),
print '%',left[0].string.encode('utf8'), ':',
aList = right[0].find_all('a')
for a in aList:
print a['title'].encode('utf8'),
print
else:
for text in main.stripped_strings:
if(u'»' in text):break
details.append(text)
print '#'.join(details).encode('utf8')
break
except Exception,e:
print e
tn = tn + 1
#print url, " access error!"
#print "try ", tn, "time"
time.sleep(5)
if tn==tryNum:
#print "Cannot fetch page!"
return -1
return 0
if __name__ == "__main__":
wordsUrlList = open('verb_ok.txt')
for line in wordsUrlList.readlines():
l = line.split(' ')
link = l[0]
word = l[1].strip('\n')
print word, '%', link, '%',
if(fetchDetail(link, word)==-1):
print link, word, "ERROR."
print "Finished"
#indexUrl = "http://www.weblio.jp/category/dictionary/nhgkt/aa"
#f = urllib2.urlopen(indexUrl)
#content = f.read()
#soup = BeautifulSoup(content, 'html.parser')
#urlTable = soup.find(attrs={'class':'kanaAlpha'})
#aList = urlTable.find_all('a')
#for a in aList:
# print '"'+a['href']+'",'
|
gtaylor/EVE-Market-Data-Relay
|
emdr/daemons/announcer/main.py
|
Python
|
mit
| 1,274
| 0.00314
|
"""
Gateways connect to Announcer daemons, sending zlib compressed JSON
representations of market data. Fro
|
m here, the Announcer PUBs the messages
out to anyone SUBscribing. This could be Relays, or end-users.
"""
import logging
logger = logging.getLogger(__name__)
import gevent
import zmq.green as zmq
from emdr.conf import de
|
fault_settings as settings
def run():
"""
Fires up the announcer process.
"""
context = zmq.Context()
receiver = context.socket(zmq.SUB)
receiver.setsockopt(zmq.SUBSCRIBE, '')
for binding in settings.ANNOUNCER_RECEIVER_BINDINGS:
# Gateways connect to the Announcer to PUB messages.
receiver.bind(binding)
sender = context.socket(zmq.PUB)
for binding in settings.ANNOUNCER_SENDER_BINDINGS:
# Announcers offer up the data via PUB.
sender.bind(binding)
def relay_worker(message):
"""
This is the worker function that re-sends the incoming messages out
to any subscribers.
:param str message: A JSON string to re-broadcast.
"""
sender.send(message)
logger.debug('Message announced.')
logger.info("Announcer is now listening for order data.")
while True:
gevent.spawn(relay_worker, receiver.recv())
|
Tojaj/yum-metadata-diff
|
yum_metadata_diff/diff_objects.py
|
Python
|
lgpl-2.1
| 10,928
| 0.001647
|
import pprint
import difflib
_MAX_LENGTH = 80
def pretty_diff(d1, d2):
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def sequence_diff(seq1, seq2, seq_type=None):
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
return 'First sequence is not a %s: %s' % \
(seq_type_name, safe_repr(seq1))
if not isinstance(seq2, seq_type):
return 'Second sequence is not a %s: %s' % \
(seq_type_name, safe_repr(seq2))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
return standardMsg + '\n' + diffMsg
def set_diff(set1, set2):
try:
difference1 = set1.difference(set2)
except TypeError, e:
return 'invalid type when attempting set difference: %s' % e
except AttributeError, e:
return 'first argument does not support set difference: %s' % e
try:
difference2 = set2.difference(set1)
except TypeError, e:
return 'invalid type when attempting set difference: %s' % e
except AttributeError, e:
return 'second argument does not support set difference: %s' % e
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(
|
repr(item))
return '\n'.join(lines)
diff = None
if not isinstance(d1, type(d2)):
return diff
if d1 == d2:
return diff
if isinstance(d1, dict):
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
elif isinstance(d1, list):
diff = sequence_diff(d1, d2, seq_type=list)
elif isinstance(d1, tuple):
diff = sequ
|
ence_diff(d1, d2, seq_type=tuple)
elif isinstance(d1, set):
diff = set_diff(d1, d2)
elif isinstance(d1, frozenset):
diff = set_diff(d1, d2)
return diff
class ItemDiff(object):
ITEM_NAME = "Item"
def __init__(self):
self.differences = []
def __nonzero__(self):
return bool(len(self.differences))
def __repr__(self):
return pprint.pformat(self.__dict__)
def add_difference(self, name, val_a, val_b, item_type=None, desc=None):
self.differences.append((name, val_a, val_b, item_type, desc))
def pprint(self):
msg = ""
for difference in self.differences:
name, a, b, item_type, desc = difference
msg += " %s" % name
if item_type:
msg += " [%s]" % item_type
if desc:
msg += " - %s" % desc
msg += "\n"
nice_diff = pretty_diff(a, b)
if isinstance(a, set):
tmp_a = a - b
b = b - a
a = tmp_a
msg += " [The difference is set -> Only extra items are shown]\n"
else:
msg += "\n"
msg += " 1. %s:\n" % self.ITEM_NAME
msg += " %s\n" % pprint.pformat(a, indent=8)
msg += " 2. %s:\n" % self.ITEM_NAME
msg += " %s\n" % pprint.pformat(b, indent=8)
if nice_diff:
msg += " Diff:\n"
msg += " %s\n" % "\n ".join(nice_diff.split('\n'))
return msg
class PackageDiff(ItemDiff):
ITEM_NAME = "Package"
class RepomdItemDiff(ItemDiff):
ITEM_NAME = "Value"
class MetadataDiff(object):
def __init__(self):
self.missing_items = set() # set of checksums
self.added_items = set() # set of checksums
self.changed_items = set() # set of checksums
self.items_diffs = {}
# self.packges_diffs keys are values from self.changed_items
# and values are PackageDiff objects.
def __nonzero__(self):
return bool(len(self.missing_items) or \
len(self.added_items) or \
len(self.changed_items))
def __repr__(self):
return pprint.pformat(self.__dict__)
def pprint(self, chksum_to_name_dict=None):
def translate(chksum):
if chksum_to_name_dict and chksum in chksum_to_name_dict:
return chksum_to_name_dict[chksum]
return None
msg = ""
if self.missing_i
|
robjordan/sitefinder
|
src/sitefinder_project/wsgi.py
|
Python
|
mit
| 942
| 0.003185
|
"""
WSGI config for sitef
|
inder_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sitefinder_project.settings.production")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_appli
|
cation()
# Wrap werkzeug debugger if DEBUG is on
from django.conf import settings
if settings.DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
def null_technical_500_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_technical_500_response
application = DebuggedApplication(application, evalex=True)
except ImportError:
pass
|
MachineLearningControl/OpenMLC-Python
|
MLC/Population/Creation/IndividualSelection.py
|
Python
|
gpl-3.0
| 2,329
| 0.001719
|
# -*- coding: utf-8 -*-
# MLC (Machine Learning Control): A genetic algorithm library to solve chaotic problems
# Copyright (C) 2015-2017, Thomas Duriez (thomas.duriez@gmail.com)
# Copyright (C) 2015, Adrian Durán (adrianmdu@gmail.com)
# Copyright (C) 2015-2017, Ezequiel Torres Feyuk (ezequiel.torresfeyuk@gmail.com)
# Copyright (C) 2016-2017, Marco Germano Zbrun (marco.germano@intraway.com)
# Copyright (C) 2016-2017, Raúl Lopez Skuba (raulopez0@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope tha
|
t it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for mor
|
e details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from BaseCreation import BaseCreation
from MLC.db.mlc_repository import MLCRepository
class IndividualSelection(BaseCreation):
"""
Fill a Population with fixed Individuals.
selected_individuals: dictionary containing {Individual: positions inside
the first population}
fill_creator: creator used to fill empty positions.
Empty positions inside the Population will be completed using the neighbor individual,
"""
def __init__(self, selected_individuals, fill_creator):
BaseCreation.__init__(self)
self.__fill_creator = fill_creator
self.__selected_individuals = selected_individuals
self.__individuals = []
def create(self, gen_size):
self.__fill_creator.create(gen_size)
self.__individuals = self.__fill_creator.individuals()
# Add Individuals
for individual, positions in self.__selected_individuals.items():
for position in positions:
if position < gen_size:
individual_id, _ = MLCRepository.get_instance().add_individual(individual)
self.__individuals[position] = (position, individual_id)
def individuals(self):
return self.__individuals
|
natbraun/biggraphite
|
tests/test_drivers_utils.py
|
Python
|
apache-2.0
| 1,554
| 0
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import unittest
import mock
from biggraphite.drivers import _utils
class CountDownTest(unittest.TestCase):
_COUNT = 42
def setUp(self):
self.on_zero = mock.Mock()
self.count_down = _utils.CountDown(self._COUNT, self.on_zero)
def test_on_failure(self):
exc = Exception()
self.count_down.on_failure(exc)
self.on_zero.assert_called_on
|
ce()
# Failing again should not call the callback again.
self.count_down.on_failure(exc)
self.on_zero.assert_called_once()
def test_on_result(self):
result = "whatever this is not used"
for _ in xrange(self._COUNT - 1):
self.count_down.on_result(result)
self.on_zero.assert_not_called()
self.count_down.on_result(result)
self.on_zero.assert_called_with(None)
if __name__
|
== "__main__":
unittest.main()
|
joezippy/paywall
|
test-keybase.py
|
Python
|
apache-2.0
| 1,556
| 0.001928
|
#!/usr/bin/python3
import cgi
import cgitb
import datetime
import json
import os
import re
import requests
import subprocess
import sys
import time
from bmdjson import check_address
print("Content-Type: text/plain\n")
print("testing keybase")
print()
print("PASS:")
signature = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdyH7rvq v5weRa0zkSjiJmm 8dzt8BnSF7QPfAy AmWtlYORgWXP5hk aXmzZHPBPoIRpYD qsXcl0JX7RT65NS KLnnW8kwG9ujBNt r2bd6GNLnp4xVMr btCVAG2TMDpNhVf yXSbZmzQDnE6mIM Y4oS4YGVbw244Je Bc7lmO6225Gu6tj HgIwRnLz975GBZU Bc3GLDyRpvTEGXr AzRtx0gMk2FzHxf 2oimZKG. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature)
for k, v in sorted(sig_result.items(), key=lambda
|
x: x[0]):
# is saying the leftmost of the pair
|
k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("FAIL: Bad String")
signature2 = "BEGIN KEYBASE SALTPACK SIGNED MESSAGE. kXR7VktZdy27rvq v5weRa0zkDL3e9k D1e7HgTLY1WFWdi UfZI1s56lquWUJu lBvdIblMbFGwTGa M9oYSI9cU7KjGW9 2JOGghIjQX3Fqw5 xsvEpPo9pEuA25J Ut0J0Fur0C3F8oZ n50PAvVWVmb0iEP 5MNUBEMHMo5DTtF OhK66v3FFwu0qJe 8R35q5A5ycevVsR pdaOBQQ1VGcNIlF 9YU6a0Wi5kd85JH rjSupUZ. END KEYBASE SALTPACK SIGNED MESSAGE."
sig_result = check_address(signature2)
for k, v in sorted(sig_result.items(), key=lambda x: x[0]):
# is saying the leftmost of the pair k,v -- alphabetic sorting of keys
# now sig_addr, sig_by, then sig_good -- display bugged me
print("[" + str(k) + "] = ", v)
print()
print("end.")
|
rfhk/awo-custom
|
account_invoice_line_view_oaw/__init__.py
|
Python
|
lgpl-3.0
| 199
| 0
|
#
|
-*
|
- coding: utf-8 -*-
# Copyright 2015-2017 Rooms For (Hong Kong) Limted T/A OSCG
# Copyright 2017 eHanse
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import models
|
joerg84/arangodb
|
3rdParty/V8/v5.7.0.0/tools/gen-postmortem-metadata.py
|
Python
|
apache-2.0
| 25,114
| 0.01537
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Emits a C++ file to be compiled and linked into libv8 to support postmortem
# debugging tools. Most importantly, this tool emits constants describing V8
# internals:
#
# v8dbg_type_CLASS__TYPE = VALUE Describes class type values
# v8dbg_class_CLASS__FIELD__TYPE = OFFSET Describes class fields
# v8dbg_parent_CLASS__PARENT Describes class hierarchy
# v8dbg_frametype_NAME = VALUE Describes stack frame values
# v8dbg_off_fp_NAME = OFFSET Frame pointer offsets
# v8dbg_prop_NAME = OFFSET Object property offsets
# v8dbg_NAME = VALUE Miscellaneous values
#
# These constants are declared as global integers so that they'll be present in
# the generated libv8 binary.
#
import re
import sys
#
# Miscellaneous constants such as tags and masks used for object identification,
# enumeration values used as indexes in internal tables, etc..
#
consts_misc = [
{ 'name': 'FirstNonstringType', 'value': 'FIRST_NONSTRING_TYPE' },
{ 'name': 'APIObjectType', 'value': 'JS_API_OBJECT_TYPE' },
{ 'name': 'SpecialAPIObjectType', 'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
{ 'name': 'IsNotStringMask', 'value': 'kIsNotStringMask' },
{ 'name': 'StringTag', 'value': 'kStringTag' },
{ 'name': 'NotStringTag', 'value': 'kNotStringTag' },
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
{ 'name': 'OneByteStringTag', 'value': 'kOneByteStringTag' },
{ 'name': 'StringRepresentationMask',
'value': 'kStringRepresentationMask' },
{ 'name': 'SeqStringTag', 'value': 'kSeqStringTag' },
{ 'name': 'ConsStringTag', 'value': 'kConsStringTag' },
{ 'name': 'ExternalStringTag', 'value': 'kExternalStringTag' },
{ 'name': 'SlicedStringTag', 'value': 'kSlicedStringTag' },
{ 'name': 'HeapObjectTag', 'value': 'kHeapObjectTag' },
{ 'name': 'HeapObjectTagMask', 'value': 'kHeapObjectTagMask' },
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
{ 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
{ 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
{ 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
{ 'name': 'OddballException', 'value': 'Oddball::kException' },
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
{ 'name': 'prop_type_const_field',
'value': 'DATA_CONSTANT' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
{ 'name': 'prop_representation_mask',
'value': 'PropertyDetails::RepresentationField::kMask' },
{ 'name': 'prop_representation_shift',
'value': 'PropertyDetails::RepresentationField::kShift' },
{ 'name': 'prop_representation_integer8',
'value': 'Representation::Kind::kInteger8' },
{ 'name': 'prop_representation_uinteger8',
'value': 'Representation::Kind::kUInteger8' },
{ 'name': 'prop_representation_integer16',
'value': 'Representation::Kind::kInteger16' },
{ 'name': 'prop_representation_uinteger16',
'value': 'Representation::Kind::kUInteger16' },
{ 'name': 'prop_representation_smi',
'value': 'Representation::Kind::kSmi' },
{ 'name': 'prop_representation_integer32',
'value': 'Representation::Kind::kInteger32' },
{ 'name': 'prop_representation_double',
'value': 'Representation::Kind::kDouble' },
{ 'name': 'prop_representation_heapobject',
'value': 'Representation::Kind::kHeapObject' },
{ 'name': 'prop_representation_tagged',
'value': 'Representation::Kind::kTagged' },
{ 'name': 'prop_representation_external',
'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
{ 'name': 'prop_desc_details',
'value': 'DescriptorArray::kDescriptorDetails' },
{ 'name': 'prop_desc_value',
'value': 'DescriptorArray::kDescriptorValue' },
{ 'name': 'prop_desc_size',
'value': 'DescriptorArray::kDescriptorSize' },
{ 'name': 'elements_fast_holey_elements',
'value': 'FAST_HOLEY_ELEMENTS' },
{ 'name': 'elements_fast_elements',
'value': 'FAST_ELEMENTS' },
{ 'name': 'elements_dictionary_elements',
'value': 'DICTIONARY_ELEMENTS' },
{ 'name': 'bit_field2_elements_kind_mask',
'value': 'Map::ElementsKindBits::kMask' },
{ 'name': 'bit_field2_elements_kind_shift',
'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
{ 'name': 'bit_field3_number_of_own_descriptors_mask',
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptor
|
s_shift',
'value': 'Map::Numbe
|
rOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
{ 'name': 'off_fp_constant_pool',
'value': 'StandardFrameConstants::kConstantPoolOffset' },
{ 'name': 'off_fp_function',
'value': 'JavaScriptFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'JavaScriptFrameConstants::kLastParameterOffset' },
{ 'name': 'scopeinfo_idx_nparams',
'value': 'ScopeInfo::kParameterCount' },
{ 'name': 'scopeinfo_idx_nstacklocals',
'value': 'ScopeInfo::kStackLocalCount' },
|
Acehaidrey/incubator-airflow
|
airflow/providers/google/cloud/example_dags/example_automl_nl_text_sentiment.py
|
Python
|
apache-2.0
| 3,589
| 0.001115
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLImportDataOperator,
AutoMLTrainModelOperator,
)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_SENTIMENT_BUCKET = os.environ.get("GCP_AUTOML_SENTIMENT_BUCKET", "gs://INVALID BUCKET NAME")
# Example values
DATASET_ID = ""
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"text_sentiment_model_metadata": {},
}
# Example dataset
DATASET = {
"display_name": "test_text_sentiment_dataset",
"text_sentiment_dataset_metadata": {"sentiment_max": 10},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_SENTIMENT_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
# Example DAG for AutoML Natural Language Text Sentiment
with models.DAG(
"example_automl_text_sentiment",
schedule_interval=None, # Override to match your needs
start_date=datetime(2021, 1, 1),
catchup=False,
user_defined_macros={"extract_object_id": extract_object_id},
tags=['example'],
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION
)
dataset_id = create_dataset_tas
|
k.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
MODEL["dataset_id"] = dataset_id
create_model =
|
AutoMLTrainModelOperator(task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION)
model_id = create_model.output['model_id']
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
import_dataset_task >> create_model
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> create_model
# create_model >> delete_model_task
# create_dataset_task >> delete_datasets_task
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/ams/_params.py
|
Python
|
mit
| 51,064
| 0.004015
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (get_location_type,
get_enum_type,
tags_type,
get_three_state_flag)
from azure.cli.command_modules.ams._completers import (get_role_definition_name_completion_list,
get_cdn_provider_completion_list,
get_default_streaming_policies_completion_list,
get_presets_definition_name_completion_list,
get_allowed_languages_for_preset_completion_list,
get_protocols_completion_list,
get_token_type_completion_list,
get_fairplay_rentalandlease_completion_list,
get_token_completion_list,
get_mru_type_completion_list,
get_encoding_types_list,
get_allowed_resolutions_completion_list,
|
get_allowed_transcription_languages,
get_allowed_analysis_modes,
get_stretch_mode_types_list,
get_storage_authentication_allowed_values_list)
from azure.cli.command_modules.ams._validators import (validate_stora
|
ge_account_id,
datetime_format,
validate_correlation_data,
validate_token_claim,
validate_output_assets,
validate_archive_window_length,
validate_key_frame_interval_duration)
from azure.mgmt.media.models import (Priority, AssetContainerPermission, LiveEventInputProtocol, StreamOptionsFlag, OnErrorType, InsightsType)
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements
name_arg_type = CLIArgumentType(options_list=['--name', '-n'], id_part='name', help='The name of the Azure Media Services account.', metavar='NAME')
account_name_arg_type = CLIArgumentType(options_list=['--account-name', '-a'], id_part='name', help='The name of the Azure Media Services account.', metavar='ACCOUNT_NAME')
storage_account_arg_type = CLIArgumentType(options_list=['--storage-account'], validator=validate_storage_account_id, metavar='STORAGE_NAME')
password_arg_type = CLIArgumentType(options_list=['--password', '-p'], metavar='PASSWORD_NAME')
transform_name_arg_type = CLIArgumentType(options_list=['--transform-name', '-t'], metavar='TRANSFORM_NAME')
expiry_arg_type = CLIArgumentType(options_list=['--expiry'], type=datetime_format, metavar='EXPIRY_TIME')
default_policy_name_arg_type = CLIArgumentType(options_list=['--content-key-policy-name'], help='The default content key policy name used by the streaming locator.', metavar='DEFAULT_CONTENT_KEY_POLICY_NAME')
archive_window_length_arg_type = CLIArgumentType(options_list=['--archive-window-length'], validator=validate_archive_window_length, metavar='ARCHIVE_WINDOW_LENGTH')
key_frame_interval_duration_arg_type = CLIArgumentType(options_list=['--key-frame-interval-duration'], validator=validate_archive_window_length, metavar='ARCHIVE_WINDOW_LENGTH')
correlation_data_type = CLIArgumentType(validator=validate_correlation_data, help="Space-separated correlation data in 'key[=value]' format. This customer provided data will be returned in Job and JobOutput state events.", nargs='*', metavar='CORRELATION_DATA')
token_claim_type = CLIArgumentType(validator=validate_token_claim, help="Space-separated required token claims in '[key=value]' format.", nargs='*', metavar='ASYMMETRIC TOKEN CLAIMS')
output_assets_type = CLIArgumentType(validator=validate_output_assets, nargs='*', help="Space-separated assets in 'assetName=label' format. An asset without label can be sent like this: 'assetName='", metavar='OUTPUT_ASSETS')
with self.argument_context('ams') as c:
c.argument('account_name', name_arg_type)
with self.argument_context('ams account') as c:
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group, required=False)
c.argument('tags', arg_type=tags_type)
with self.argument_context('ams account create') as c:
c.argument('storage_account', storage_account_arg_type,
help='The name or resource ID of the primary storage account to attach to the Azure Media Services account. The storage account MUST be in the same Azure subscription as the Media Services account. It is strongly recommended that the storage account be in the same resource group as the Media Services account. Blob only accounts are not allowed as primary.')
c.argument('assign_identity', options_list=['--mi-system-assigned'], action='store_true', help='Set the system managed identity on the media services account.')
with self.argument_context('ams account check-name') as c:
c.argument('account_name', options_list=['--name', '-n'], id_part=None,
help='The name of the Azure Media Services account.')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
with self.argument_context('ams account mru') as c:
c.argument('type', help='Speed of reserved processing units. The cost of media encoding depends on the pricing tier you choose. See https://azure.microsoft.com/pricing/details/media-services/ for further details. Allowed values: {}.'.format(", ".join(get_mru_type_completion_list())))
c.argument('count', type=int, help='The number of the encoding reserved units that you want to be provisioned for this account for concurrent tasks (one unit equals one task).')
with self.argument_context('ams account storage') as c:
c.argument('account_name', account_name_arg_type)
c.argument('storage_account', name_arg_type,
help='The name or resource ID of the secondary storage account to detach from the Azure Media Services account.',
validator=validate_storage_account_id)
with self.argument_context('ams account storage sync-storage-keys') as c:
c.argument('storage_account_id', required=True, help="The storage account Id.")
with self.argument_context('ams account storage set-authentication') as c:
c.argument('storage_auth', arg_type=get_enum_type(get_storage_authentication_allowed_values_list()), help='The type of authentication for the storage account associated with the media services account.')
with self.argument_context('ams account sp') as c:
c.argument('account_name', account_name_arg_type)
c.argument('sp_name', name_arg_type,
help="The app name or app URI to associate the RBAC with. If not present, a default name like '{amsaccountname}-access-sp' will be generated.")
c.argument('new_sp_name', help="The new app name or app URI to update the RBA
|
RamaneekGill/CSC320-Winter-2014
|
project 2/p2.py
|
Python
|
gpl-2.0
| 12,095
| 0.015048
|
import os
os.chdir('C:/Users/Ramaneek/SkyDrive/Documents/University/Third Year/CSC320/project 2/')
###########################################################################
## Handout painting code.
###########################################################################
from PIL import Image
from pylab import *
from canny import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import random
import time
import matplotlib.image as mpimg
import scipy as sci
import scipy.misc
from scipy.signal import convolve2d as conv
np.set_printoptions(threshold = np.nan)
def colorImSave(filename, array):
imArray = sci.misc.imresize(array, 3., 'nearest')
if (len(imArray.shape) == 2):
sci.misc.imsave(filename, cm.jet(imArray))
else:
sci.misc.imsave(filename, imArray)
def markStroke(mrkd, p0, p1, rad, val):
# Mark the pixels that will be painted by
# a stroke from pixel p0 = (x0, y0) to pixel p1 = (x1, y1).
# These pixels are set to val in the ny x nx double array mrkd.
# The paintbrush is circular with radius rad>0
sizeIm = mrkd.shape
sizeIm = sizeIm[0:2];
nx = sizeIm[1]
ny = sizeIm[0]
p0 = p0.flatten('F')
p1 = p1.flatten('F')
rad = max(rad,1)
# Bounding box
concat = np.vstack([p0,p1])
bb0 = np.floor(np.amin(concat, axis=0))-rad
bb1 = np.ceil(np.amax(concat, axis=0))+rad
# Check for intersection of bounding box with image.
intersect = 1
if ((bb0[0] > nx) or (bb0[1] > ny) or (bb1[0] < 1) or (bb1[1] < 1)):
intersect = 0
if intersect:
# Crop bounding box.
bb0 = np.amax(np.vstack([np.array([bb0[0], 1]), np.array([bb0[1],1])]), axis=1)
bb0 = np.amin(np.vstack([np.array([bb0[0], nx]), np.array([bb0[1],ny])]), axis=1)
bb1 = np.amax(np.vstack([np.array([bb1[0], 1]), np.array([bb1[1],1])]), axis=1)
bb1 = np.amin(np.vstack([np.array([bb1[0], nx]), np.array([bb1[1],ny])]), axis=1)
# Compute distance d(j,i) to segment in bounding box
tmp = bb1 - bb0 + 1
szBB = [tmp[1], tmp[0]]
q0 = p0 - bb0 + 1
q1 = p1 - bb0 + 1
t = q1 - q0
nrmt = np.linalg.norm(t)
[x,y] = np.meshgrid(np.array([i+1 for i in range(int(szBB[1]))]), np.array([i+1 for i in range(int(szBB[0]))]))
d = np.zeros(szBB)
d.fill(float("inf"))
if nrmt == 0:
# Use distance to point q0
d = np.sqrt( (x - q0[0])**2 +(y - q0[1])**2)
idx = (d <= rad)
else:
# Use distance to segment q0, q1
t = t/nrmt
n = [t[1], -t[0]]
tmp = t[0] * (x - q0[0]) + t[1] * (y - q0[1])
idx = (tmp >= 0) & (tmp <= nrmt)
if np.any(idx.flatten('F')):
d[np.where(idx)] = abs(n[0] * (x[np.where(idx)] - q0[0]) + n[1] * (y[np.where(idx)] - q0[1]))
idx = (tmp < 0)
if np.any(idx.flatten('F')):
d[np.where(idx)] = np.sqrt( (x[np.where(idx)] - q0[0])**2 +(y[np.where(idx)] - q0[1])**2)
idx = (tmp > nrmt)
if np.any(idx.flatten('F')):
d[np.where(idx)] = np.sqrt( (x[np.where(idx)] - q1[0])**2 +(y[np.where(idx)] - q1[1])**2)
#Pixels within crop box to paint have distance <= rad
idx = (d <= rad)
#Mark the pixels
if np.any(idx.flatten('F')):
xy = (bb0[1]-1+y[np.where(idx)] + sizeIm[0] * (bb0[0]+x[np.where(idx)]-2)).astype(int)
sz = mrkd.shape
m = mrkd.flatten('F')
m[xy-1] = val
mrkd = m.reshape(mrkd.shape[0], mrkd.shape[1], order = 'F')
'''
row = 0
col = 0
for i in range(len(m)):
col = i//sz[0]
mrkd[row][col] = m[i]
row += 1
if row >= sz[0]:
row = 0
'''
return mrkd
def paintStroke(canvas, x, y, p0, p1, colour, rad):
# Paint a stroke from pixel p0 = (x0, y0) to pixel p1 = (x1, y1)
# on the canvas (ny x nx x 3 double array).
# The stroke has rgb values given by colour (a 3 x 1 vector, with
# values in [0, 1]. The paintbrush is circular with radius rad>0
sizeIm = canvas.shape
sizeIm = sizeIm[0:2]
idx = markStroke(np.zeros(sizeIm), p0, p1, rad, 1) > 0
# Paint
if np.any(idx.flatten('F')):
canvas = np.reshape(canvas, (np.prod(sizeIm),3), "F")
xy = y[idx] + sizeIm[0] * (x[idx]-1)
canvas[xy-1,:] = np.tile(np.transpose(colour[:]), (len(xy), 1))
canvas = np.reshape(canvas, sizeIm + (3,), "F")
return canvas
if __name__ == "__main__":
# Read image and convert it to double, and scale each R,G,B
# channel to range [0,1].
imRGB = array(Image.open('orchid.jpg'))
imRGB = double(imRGB) / 255.0
plt.clf()
plt.axis('off')
sizeIm = imRGB.shape
sizeIm = sizeIm[0:2]
# Set radius of paint brush and half length of drawn lines
rad = 1
halfLen = 5
# Set up x, y coordinate images, and canvas.
[x, y] = np.meshgrid(np.array([i+1 for i in range(int(sizeIm[1]))]), np.array([i+1 for i in range(int(sizeIm[0]))]))
canvas = np.zeros((sizeIm[0],sizeIm[1], 3))
canvas.fill(-1) ## Initially mark the canvas with a value out of range.
# Negative values will be used to denote pixels which are unpainted.
# Random number seed
np.random.seed(29645)
# Orientation of paint brush strokes
theta = 2 * pi * np.random.rand(1,1)[0][0]
# Set ve
|
ctor from center to one end of th
|
e stroke.
delta = np.array([cos(theta), sin(theta)])
time.time()
time.clock()
k=0
#####################################################################################
gray()
#imRGB_mono = np.zeros((sizeIm[0], sizeIm[1]))
#imRGB_mono = imRGB[:,:,0] * 0.30 + imRGB[:,:,1] * 0.59 + imRGB[:,:,2] * 0.11
#using canny edge detection on red filter
imRGB_mono = np.zeros((sizeIm[0], sizeIm[1], 3))
imRGB_mono = imRGB[:,:,0]
#orchid
high = 20; low = 7;
#myimg
#high = 15; low = 2;
canny_im = np.zeros((sizeIm[0],sizeIm[1], 3))
canny_im = canny(imRGB_mono, 2.0, high, low)
imshow(canny_im)
show()
### Part 5 code
imin = imRGB_mono.copy() * 255.0
wsize = 5
sigma = 4
gausskernel = gaussFilter(sigma, window = wsize)
# fx is the filter for vertical gradient
# fy is the filter for horizontal gradient
# Please not the vertical direction is positive X
fx = createFilter([0, 1, 0,
0, 0, 0,
0, -1, 0])
fy = createFilter([ 0, 0, 0,
1, 0, -1,
0, 0, 0])
imout = conv(imin, gausskernel, 'valid')
# print "imout:", imout.shape
gradxx = conv(imout, fx, 'valid')
gradyy = conv(imout, fy, 'valid')
gradx = np.zeros(imRGB_mono.shape)
grady = np.zeros(imRGB_mono.shape)
padx = (imin.shape[0] - gradxx.shape[0]) / 2.0
pady = (imin.shape[1] - gradxx.shape[1]) / 2.0
gradx[padx:-padx, pady:-pady] = gradxx
grady[padx:-padx, pady:-pady] = gradyy
# Net gradient is the square root of sum of square of the horizontal
# and vertical gradients
grad = hypot(gradx, grady)
theta = arctan2(grady, gradx)
theta = 180 + (180 / pi) * theta
# Only significant magnitudes are considered. All others are removed
xx, yy = where(grad < 0.33)
theta[xx, yy] = math.degrees(2 * pi * np.random.rand(1,1)[0][0])
#grad[xx, yy] = 0 not needed
imshow(theta)
show()
#colorImSave("flipped_fy_part5_theta.png", theta)
normals = theta.copy() + 90 #add pi/2 to it for the normals
#####################################################################################
#run while there isn still a pixel left to paint
while len(where(canvas < 0)[0]) != 0:
#tuple of pixels not painted
empty_canvas_pixels = where(canvas < 0)
#choose a random non-painted pix
|
lincolnloop/salmon
|
setup.py
|
Python
|
bsd-3-clause
| 801
| 0
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='salmon',
version='0.3.0-dev',
description="A simple metric collector with alerts.",
long_description=open('README.rst').read(),
author="Peter Baumgarter",
author_email='pete@lincolnloop.com',
url='https://github.com/lincolnloop/salmon',
license='BSD',
install_requires=[
'django==1.6
|
.1',
'djangorestframework==2.3.9',
'South==0.8.3',
'logan==0.5.9.1',
'gunicorn==18.0',
|
'whisper==0.9.10',
'dj-static==0.0.5',
'pytz',
],
entry_points={
'console_scripts': [
'salmon = salmon.core.runner:main',
],
},
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.