repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
mkermani144/sudo-make-me-a-sandwitch | trending-repos.py | Python | mit | 1,796 | 0 | '''
This script opens top today's trending github repos in your browser
if you have not previously visited them.
Repos are selected based on https://github.com/trending.
'''
import requests
import bs4
import webbrowser
import os
from optparse import OptionParser
parser = OptionParser('usage: trending-repos [options]')
parser. | add_option(
'-n', | '--number-of-repos',
dest='num_of_repos',
default='6',
type='int',
help='set number of trending repos to be opened in the browser')
parser.add_option(
'-f', '--force',
action='store_true',
dest='is_forced',
help='open all of trending repos, including previously-visited ones.'
)
options, args = parser.parse_args()
num_of_repos = options.num_of_repos
is_forced = options.is_forced
if num_of_repos > 25:
parser.error('There are only 25 trending repos available')
repos_file = os.path.dirname(os.path.realpath(__file__)) + '/repos.list'
if not os.path.isfile(repos_file):
f = open(repos_file, 'w+')
f.close()
with open(repos_file, 'r+') as repos_list:
repos = repos_list.read().splitlines()
html = requests.get('https://github.com/trending')
html.raise_for_status()
soup = bs4.BeautifulSoup(html.text, 'lxml')
elements = soup.select('.repo-list > li > div > h3 > a')[:num_of_repos]
rel_urls = [el.attrs['href'] for el in elements]
urls = [rel_url for rel_url in rel_urls if rel_url not in repos]
print('New repos:', len(urls))
print('Previously visited repos:', num_of_repos - len(urls))
if is_forced:
urls = rel_urls
def_stdout = os.dup(1)
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, 1)
for url in urls:
webbrowser.open('https://github.com' + url)
repos_list.write(url + '\n')
os.dup2(def_stdout, 1)
|
antoinecarme/pyaf | tests/model_control/detailed/transf_None/model_control_one_enabled_None_ConstantTrend_Seasonal_WeekOfYear_SVR.py | Python | bsd-3-clause | 161 | 0.049689 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None' | ] , ['ConstantTrend'] | , ['Seasonal_WeekOfYear'] , ['SVR'] ); |
milanjelisavcic/milanbot | milanbot/__init__.py | Python | unlicense | 743 | 0 | import json
# T | ransliteration map from Cyrillic to Latin script
with open('mi | lanbot/transliteration.json') as json_file:
cyrillic_transliteration = json.load(json_file)
# Supported languages that 'MilanBot' works with
with open('milanbot/languages.json') as json_file:
languages = json.load(json_file)
sparql_disambiguation = \
'SELECT ?item WHERE {?item wdt:P31 wd:Q4167410 }'
sparql_disambiguation_sr = \
'SELECT ?item WHERE { ?item wdt:P31 wd:Q4167410 . ' \
'?wiki0 schema:about ?item . ' \
'?wiki0 schema:isPartOf <https://sh.wikipedia.org/> }'
sparql_people = \
'SELECT ?item WHERE { ?item wdt:P31 wd:Q5 . ' \
'?wiki0 schema:about ?item . ' \
'?wiki0 schema:isPartOf <https://sr.wikipedia.org/> }'
|
nguyenla/Spelling | Game3Controller.py | Python | gpl-2.0 | 7,003 | 0.008282 | #!/usr/bin/env python
import os
from Game3View import Game3View
from HomeView import HomeView
import HomeController
import sys
from gi.repository import Gtk
from gi.repository import Gdk
from random import randint
from random import shuffle
class Game3Controller:
def __init__(self, view, parent):
self.view = view
self.parent = parent
#calls the proper method when the button is clicked
self.view.skip.connect_object("clicked", self.skip_press, "SKIP")
self.view.word1.connect_object("clicked", self.check_correct, "0")
self.view.word2.connect_object("clicked", self.check_correct, "1")
self.view.word3.connect_object("clicked", self.check_correct, "2")
self.view.word4.connect_object("clicked", self.check_correct, "3")
self.view.word5.connect_object("clicked", self.check_correct, "4")
self.back_button_signal = self.view.navBar.button.connect("clicked", self.home_page)
# Fields of the controller
self.numGuesses = 1
self.level = 1
self.score = 0
self.skipsLeft = 3
self.definitions = []
self.Words = []
self.roundList = []
self.picked = []
self.def_array = []
self.totalScore = 0
self.isNext = False
self.gotPoints = False
self.nextLevel = False
self.view.skip.set_label("SKIP\n(" + str(self.skipsLeft) + " Left)")
self.generate_level()
#loads the words and definitions, then sets up the level
def generate_level(self):
self.get_correct(self.level)
self.load_level_definitions(self.level)
self.make_round()
#resets the resultLabel and skip button to initial value, and resets gotPoints
#to false. Sets up the words to display on the buttons and definition to display
def make_round(self):
self.view.resultLabel.set_text("")
self.view.skip.set_label("SKIP\n(" + str(self.skipsLeft) + " Left)")
self.numGuesses = 1
self.gotPoints = False
self.roundList = []
self.picked = []
self.def_array = []
#gets 5 unique words for the round, and the correspoinding defintions
while len(self.roundList) < 5:
x = randint(0,len(self.Words)-1)
if x not in self.picked:
self.roundList.append(self.Words[x])
self.def_array.append(x)
self.picked.append(x)
shuffle(self.picked)
self.view.def1.set_markup("<span size='14000'><b> Definition: " + self.definitions[self.picked[0]] + "</b></span>")
self.view.word1.set_label(self.roundList[0])
self.view.word2.set_label(self.roundList[1])
self.view.word3.set_label(self.roundList[2])
self.view.word4.set_label(self.roundList[3])
self.view.word5.set_label(self.roundList[4])
#negates the skipLeft decrease if the label is currently next
#makes a new round while skips are left and increments variables accordingly
#When it is the end of the level, resets the screen
def skip_press(self,widget):
if self.isNext:
self.skipsLeft += 1
self.isNext = False
if self.skipsLeft > 0:
self.make_round()
self.skipsLeft = self.skipsLeft - 1
self.totalScore +=10
self.view.skip.set_label("SKIP\n(" + str(self.skipsLeft) + " Left)")
else:
self.view.resultLabel.set_text("No Skips Left!")
if self.nextLevel:
#puts the widgets back on the screen for the next level
self.nextLevel = False
self.view.label.set_text("LEVEL " + str(self.level))
self.view.word1.show()
self.view.word2.show()
self.view.word3.show()
self.view.word4.show()
self.view.word5.show()
self.view.def1.show()
self.generate_level()
#if def matches word, updates variables accordingly and deletes the word and def from the array
#when there are less than 5 words left, end the level
def check_correct(self,widget):
#checks if number matches the number at int(widget) index
if self.numGuesses == 0:
self.endLevel()
self.view.label.set_markup("<span size='10000'><b>Incorrect. Too many guesses</b></span>")
self.skipsLeft += 1
#self.nextLevel = False
if self.picked[0] == self.def_array[int(widget)] and self.isNext==False:
self.view.resultLabel.set_markup("<span size='10000'><b>CORRECT!</b></span>")
self.updateScore(10)
self.view.skip.set_label("NEXT")
self.isNext = True
self.gotPoints = True
del self.definitions[self.picked[0]]
del self.Words[self.picked[0]]
else:
if self.gotPoints == False:
if self.numGuesses > 0:
| self.view.resultLabel.set_markup("<span size='10000'><b>INCORRECT! " + str(self.numGuesses) + " left.</b></span>")
| self.numGuesses -= 1
#the player answered enough correctly to move on.
if len(self.definitions) <= 5:
self.level += 1
self.totalScore +=10
self.endLevel()
#hides the variables to display the results from the level
def endLevel(self):
self.view.word1.hide()
self.view.word2.hide()
self.view.word3.hide()
self.view.word4.hide()
self.view.word5.hide()
#need the self.level-1 since we already incremented it
self.view.label.set_text("Level " +str(self.level-1) + " completed. You have scored " + str(self.score) + " out of " + str(self.totalScore) + " points.")
self.view.def1.hide()
self.view.resultLabel.set_text("")
self.view.skip.set_label("Continue")
self.nextLevel = True
# This function takes in a file name and load all the words from the corresponding file
def load_file(self, filename):
file = open(filename)
word = file.readline()
wordlist = []
while len(word) > 0:
wordlist.append(word[:len(word)-1])
word = file.readline()
return wordlist
# This function takes in a file name and load all the words from the corresponding file
def get_correct(self, level):
self.Words = self.load_file("Game2-CorrectLevel" + str(level))
# This function takes in a file name and load all the words from the corresponding file
def load_level_definitions(self, level):
self.definitions = self.load_file("CorrectlySpelled - Definitions" + str(level))
#increates the score when points have no already been awarded
def updateScore(self, increment):
self.score += increment
self.view.scoreLabel.set_text("SCORE: " + str(self.score))
def home_page(self, button):
self.view = HomeView(self.parent)
self.controller = HomeController.HomeController(self.view, self.parent)
|
Edraak/edx-platform | lms/envs/aws.py | Python | agpl-3.0 | 36,831 | 0.004236 | # -*- coding: utf-8 -*-
"""
This is the default template for our main set of AWS servers. This does NOT
cover the content machines, which use content.py
Common traits:
* Use memcached, and cache-backed sessions
* Use a MySQL 5.1 database
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
import datetime
import json
from .common import *
from openedx.core.lib.logsettings import get_logger_config
import os
from path import Path as path
from xmodule.modulestore.modulestore_settings import convert_module_store_setting_if_needed
from django.utils.translation import ugettext as _
# SERVICE_VARIANT specifies name of the variant used, which decides what JSON
# configuration files are read during startup.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# CONFIG_ROOT specifies the directory where the JSON configuration
# files are expected to be found. If not specified, use the project
# directory.
CONFIG_ROOT = path(os.environ.get('CONFIG_ROOT', ENV_ROOT))
# CONFIG_PREFIX specifies the prefix of the JSON configuration files,
# based on the service variant. If no variant is use, don't use a
# prefix.
CONFIG_PREFIX = SERVICE_VARIANT + "." if SERVICE_VARIANT else ""
################################ ALWAYS THE SAME ##############################
DEBUG = False
DEFAULT_TEMPLATE_ENGINE['OPTIONS']['debug'] = False
EMAIL_BACKEND = 'django_ses.SESBackend'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# IMPORTANT: With this enabled, the server must always be behind a proxy that
# strips the header HTTP_X_FORWARDED_PROTO from client requests. Otherwise,
# a user can fool our server into thinking it was an https connection.
# See
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
# for other warnings.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
###################################### CELERY ################################
# Don't use a connection pool, since connections are dropped by ELB.
BROKER_POOL_LIMIT = 0
BROKER_CONNECTION_TIMEOUT = 1
# For the Result Store, use the django cache named 'celery'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
# When the broker is behind an ELB, use a heartbeat to refresh the
# connection and to detect if it has been dropped.
BROKER_HEARTBEAT = 10.0
BROKER_HEARTBEAT_CHECKRATE = 2
# Each worker should only fetch one message at a time
CELERYD_PREFETCH_MULTIPLIER = 1
# Rename the exchange and queues for each variant
QUEUE_VARIANT = CONFIG_PREFIX.lower()
CELERY_DEFAULT_EXCHANGE = 'edx.{0}core'.format(QUEUE_VARIANT)
HIGH_PRIORITY_QUEUE = 'edx.{0}core.high'.format(QUEUE_VARIANT)
DEFAULT_PRIORITY_QUEUE = 'edx.{0}core.default'.format(QUEUE_VARIANT)
LOW_PRIORITY_QUEUE = 'edx.{0}core.low'.format(QUEUE_VARIANT)
HIGH_MEM_QUEUE = 'edx.{0}core.high_mem'.format(QUEUE_VARIANT)
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# If we're a worker on the high_mem queue, set ourselves to die after processing
# one request to avoid having memory leaks take down the worker server. This env
# var is set in /etc/init/edx-workers.conf -- this should probably be replaced
# with some celery API call to see what queue we started listening to, but I
# don't know what that call is or if it's active at this point in the code.
if os.environ.get('QUEUE') == 'high_mem':
CELERYD_MAX_TASKS_PER_CHILD = 1
CELERYBEAT_SCHEDULE = {} # For scheduling tasks, entries can be added to this dict
########################## NON-SECURE ENV CONFIG ##############################
# Things like server locations, ports, etc.
with open(CONFIG_ROOT / CONFIG_PREFIX + "env.json") as env_file:
ENV_TOKENS = json.load(env_file)
# STATIC_ROOT specifies the directory where static files are
# collected
STATIC_ROOT_BASE = ENV_TOKENS.get('STATIC_ROOT_BASE', None)
if STATIC_ROOT_BASE:
STATIC_ROOT = path(STATIC_ROOT_BASE)
# STATIC_URL_BASE specifies the base url to use for static files
STATIC_URL_BASE = ENV_TOKENS.get('STATIC_URL_BASE', None)
if STATIC_URL_BASE:
# collectstatic will fail if STATIC_URL is a unicode string
STATIC_URL = STATIC_URL_BASE.encode('ascii')
if not STATIC_URL.endswith("/"):
STATIC_URL += "/"
# DEFAULT_COURSE_ABOUT_IMAGE_URL specifies the default image to show for courses that don't provide one
DEFAULT_COURSE_ABOUT_IMAGE_URL = ENV_TOKENS.get('DEFAULT_COURSE_ABOUT_IMAGE_URL', DEFAULT_COURSE_ABOUT_IMAGE_URL)
# MEDIA_ROOT specifies the directory where user-uploaded files are stored.
MEDIA_ROOT = ENV_TOKENS.get('MEDIA_ROOT', MEDIA_ROOT)
MEDIA_URL = ENV_TOKENS.get('MEDIA_URL', MEDIA_ | URL)
# Hardcode to the Arabic name, even in English layout, check the previous commits to see other failed scripts
PLATFORM_NAME = u'إدراك'
# Fo | r displaying on the receipt. At Stanford PLATFORM_NAME != MERCHANT_NAME, but PLATFORM_NAME is a fine default
PLATFORM_TWITTER_ACCOUNT = ENV_TOKENS.get('PLATFORM_TWITTER_ACCOUNT', PLATFORM_TWITTER_ACCOUNT)
PLATFORM_FACEBOOK_ACCOUNT = ENV_TOKENS.get('PLATFORM_FACEBOOK_ACCOUNT', PLATFORM_FACEBOOK_ACCOUNT)
SOCIAL_SHARING_SETTINGS = ENV_TOKENS.get('SOCIAL_SHARING_SETTINGS', SOCIAL_SHARING_SETTINGS)
# Social media links for the page footer
SOCIAL_MEDIA_FOOTER_URLS = ENV_TOKENS.get('SOCIAL_MEDIA_FOOTER_URLS', SOCIAL_MEDIA_FOOTER_URLS)
CC_MERCHANT_NAME = ENV_TOKENS.get('CC_MERCHANT_NAME', PLATFORM_NAME)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
EMAIL_FILE_PATH = ENV_TOKENS.get('EMAIL_FILE_PATH', None)
EMAIL_HOST = ENV_TOKENS.get('EMAIL_HOST', 'localhost') # django default is localhost
EMAIL_PORT = ENV_TOKENS.get('EMAIL_PORT', 25) # django default is 25
EMAIL_USE_TLS = ENV_TOKENS.get('EMAIL_USE_TLS', False) # django default is False
SITE_NAME = ENV_TOKENS['SITE_NAME']
HTTPS = ENV_TOKENS.get('HTTPS', HTTPS)
SESSION_ENGINE = ENV_TOKENS.get('SESSION_ENGINE', SESSION_ENGINE)
SESSION_COOKIE_DOMAIN = ENV_TOKENS.get('SESSION_COOKIE_DOMAIN')
SESSION_COOKIE_HTTPONLY = ENV_TOKENS.get('SESSION_COOKIE_HTTPONLY', True)
SESSION_COOKIE_SECURE = ENV_TOKENS.get('SESSION_COOKIE_SECURE', SESSION_COOKIE_SECURE)
SESSION_SAVE_EVERY_REQUEST = ENV_TOKENS.get('SESSION_SAVE_EVERY_REQUEST', SESSION_SAVE_EVERY_REQUEST)
REGISTRATION_EXTRA_FIELDS = ENV_TOKENS.get('REGISTRATION_EXTRA_FIELDS', REGISTRATION_EXTRA_FIELDS)
CSRF_COOKIE_SECURE = ENV_TOKENS.get('CSRF_COOKIE_SECURE', CSRF_COOKIE_SECURE)
# Set the names of cookies shared with the marketing site
# These have the same cookie domain as the session, which in production
# usually includes subdomains.
EDXMKTG_LOGGED_IN_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_LOGGED_IN_COOKIE_NAME', EDXMKTG_LOGGED_IN_COOKIE_NAME)
EDXMKTG_USER_INFO_COOKIE_NAME = ENV_TOKENS.get('EDXMKTG_USER_INFO_COOKIE_NAME', EDXMKTG_USER_INFO_COOKIE_NAME)
ENV_FEATURES = ENV_TOKENS.get('FEATURES', {})
for feature, value in ENV_FEATURES.items():
FEATURES[feature] = value
CMS_BASE = ENV_TOKENS.get('CMS_BASE', 'studio.edx.org')
LMS_BASE = ENV_TOKENS.get('LMS_BASE', '')
ALLOWED_HOSTS = [
# TODO: bbeggs remove this before prod, temp fix to get load testing running
"*",
ENV_TOKENS.get('LMS_BASE'),
FEATURES['PREVIEW_LMS_BASE'],
]
# allow for environments to specify what cookie name our login subsystem should use
# this is to fix a bug regarding simultaneous logins between edx.org and edge.edx.org which can
# happen with some browsers (e.g. Firefox)
if ENV_TOKENS.get('SESSION_COOKIE_NAME', None):
# NOTE, there's a bug in Django (http://bugs.python.org/issue18012) which necessitates this being a str()
SESSION_COOKIE_NAME = str(ENV_TOKENS.get('SESSION_COOKIE_NAME'))
BOOK_URL = ENV_TOKENS['BOOK_URL']
|
ecino/compassion-modules | intervention_compassion/models/compassion_intervention.py | Python | agpl-3.0 | 31,310 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
import time
from odoo import models, fields, _, api
from odoo.exceptions import UserError
from odoo.addons.message_center_compassion.mappings import base_mapping \
as mapping
logger = logging.getLogger(__name__)
INTERVENTION_PORTAL_URL = "https://compassion.force.com/GlobalPartners/"
class CompassionIntervention(models.Model):
""" All interventions on hold or sponsored.
"""
_inherit = ['compassion.generic.intervention', 'mail.thread']
_name = 'compassion.intervention'
_description = 'Intervention'
##########################################################################
# FIELDS #
##########################################################################
# General Information
#####################
state = fields.Selection([
('on_hold', _('On Hold')),
('sla', _('SLA Negotiation')),
('committed', _('Committed')),
('active', _('Active')),
('close', _('Closed')),
('cancel', _('Cancelled')),
], default='on_hold', track_visibility='onchange')
type = fields.Selection(store=True, readonly=True)
parent_intervention_name = fields.Char(string='Parent Intervention',
readonly=True)
intervention_status = fields.Selection([
("Approved", _("Approved")),
("Cancelled", _("Cancelled")),
("Closed", _("Closed")),
("Draft", _("Draft")),
("Rejected", _("Rejected")),
("Submitted", _("Submitted")),
], readonly=True)
funding_global_partners = fields.Char(readonly=True)
cancel_reason = fields.Char(readonly=True)
fcp_ids = fields.Many2many(
'compassion.project', 'fcp_interventions', 'intervention_id', 'fcp_id',
string='FCPs', readonly=True,
)
product_template_id = fields.Many2one('product.template', 'Linked product')
# Multicompany
company_id = fields.Many2one(
'res.company',
'Company',
required=True,
index=True,
default=lambda self: self.env.user.company_id.id
)
# Schedule Information
######################
start_date = fields.Date(help='Actual start date', readonly=True)
actual_duration = fields.Integer(
help='Actual duration in months', readonly=True)
initial_planned_end_date = fields.Date(readonly=True)
planned_end_date = fields.Date(
readonly=True, track_visibility='onchange')
end_date = fields.Date(
help='Actual end date', readonly=True, track_visibility='onchange')
# Budget Information (all monetary fields are in US dollars)
####################
estimated_local_contribution = fields.Float(readonly=True)
impacted_beneficiaries = fields.Integer(
help='Actual number of impacted beneficiaries', readonly=True)
local_contribution = fields.Float(
readonly=True, help='Actual local contribution')
commitment_amount = fields.Float(
readonly=True, track_visibility='onchange')
commited_percentage = fields.Float(
readonly=True, track_visibility='onchange', default=100.0)
total_expense = fields.Char(
'Total expense', compute='_compute_move_line', readonly=True)
total_income = fields.Char(
'Total income', compute='_compute_move_line', readonly=True)
total_amendment = fields.Float()
total_actual_cost_local = fields.Float(
'Total cost (local currency)'
)
total_estimated_cost_local = fields.Float(
'Estimated costs (local currency)'
)
local_currency_id = fields.Many2one('res.currency',
related='field_office_id.country_id.'
'currency_id')
# Intervention Details Information
##################################
problem_statement = fields.Text(readonly=True)
background_information = fields.Text(readonly=True)
objectives = fields.Text(readonly=True)
success_factors = fields.Text(readonly=True)
solutions = fields.Text(readonly=True)
not_funded_implications = fields.Text(readonly=True)
implementation_risks = fields.Text(readonly=True)
# Service Level Negotiation
###########################
sla_negotiation_status = fields.Selection([
("--None--", _("None")),
("FO Costs Proposed", _("FO Costs Proposed")),
("GP Accepted Costs", _("GP Accepted Costs")),
("GP Preferences Submitted", _("GP Preferences Submitted")),
("GP Rejected Costs", _("GP Rejected Costs")),
("FO Rejected GP Preferences", _("FO Rejected GP Preferences"))
], readonly=True, track_visibility='onchange')
sla_comments = fields.Char(readonly=True)
fo_proposed_sla_costs = fields.Float(
readonly=True,
help='The costs proposed by the Field Office for the SLA')
approved_sla_costs = fields.Float(
readonly=True,
help='The final approved Service Level Agreement Cost'
)
deliverable_level_1_ids = fields.Many2many(
'compassion.intervention.deliverable',
'compassion_intervention_deliverable1_rel',
'intervention_id', 'deliverable_id',
string='Level 1 Deliverables',
compute='_compute_level1_deliverables'
)
deliverable_level_2_ids = fields.Many2many(
'compassion.intervention.deliverable',
'compassion_intervention_deliverable2_rel',
'intervention_id', 'deliverable_id',
string='Level 2 Deliverables', readonly=True
)
deliverable_level_3_ids = fields.Many2many(
'compassion.intervention.deliverable',
'compassion_intervention_deliverable3_rel',
'intervention_id', 'deliverable_id',
string='Level 3 Deliverables', readonly=True
)
sla_selection_complete = fields.Boolean()
# Hold information
##################
hold_id = fields.Char(readonly=True)
service_level = fields.Selection([
("Level 1", _("Level 1")),
| ("Level 2", _("Level 2")),
("Level 3", _("Level 3")),
], required=True, readonly=True, states={
| 'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
})
hold_amount = fields.Float(readonly=True, states={
'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
}, track_visibility='onchange')
expiration_date = fields.Date(readonly=True, states={
'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
})
next_year_opt_in = fields.Boolean(readonly=True, states={
'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
})
user_id = fields.Many2one(
'res.users', 'Primary owner',
domain=[('share', '=', False)], readonly=True, states={
'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
},
track_visibility='onchange',
oldname='primary_owner'
)
secondary_owner = fields.Char(readonly=True, states={
'on_hold': [('readonly', False)],
'sla': [('readonly', False)],
})
# Survival Information
######################
survival_slots = fields.Integer(readonly=True)
launch_reason = fields.Char(readonly=True)
mother_children_challenges = fields.Char(
'Challenges for mother and children', readonly=True
)
community_benefits = fields.Char(readonly=True)
mother_average_age = fields.Integer(
'Avg age of first-time mother', readonly=True)
household_children_average = fields.Integer(
'Avg of children per household', readonly=True
)
under_five_population = fields.Char(
'% population under age 5', readonly=True
|
dufferzafar/critiquebrainz | critiquebrainz/frontend/profile/applications/forms.py | Python | gpl-2.0 | 1,318 | 0.00607 | from flask_wtf import Form
from flask_babel import gettext
from wtforms import StringField, validators
class ApplicationForm(Form):
name = StringField(gettext('Application name'), [
validators.DataRequired(message=gettext("Application name field is empty.")),
validators.Length(min=3, message=gettext("Applicati | on name needs to be at least 3 characters long.")),
validators.Length(max=64, message=gettext("Application name needs to be at most 64 characters long."))])
desc = StringField(gettext('Description'), [
validators.DataRequired(message=gettext("Client description field is empty.")),
validators.Length(min=3, message=gettext("Client description needs to be at least 3 ch | aracters long.")),
validators.Length(max=512, message=gettext("Client description needs to be at most 512 characters long."))])
website = StringField(gettext('Homepage'), [
validators.DataRequired(message=gettext("Homepage field is empty.")),
validators.URL(message=gettext("Homepage is not a valid URI."))])
redirect_uri = StringField(gettext('Authorization callback URL'), [
validators.DataRequired(message=gettext("Authorization callback URL field is empty.")),
validators.URL(message=gettext("Authorization callback URL is invalid."))])
|
SebastiaanPasterkamp/dnd-machine | app/views/monster.py | Python | gpl-3.0 | 2,730 | 0.001465 | # -*- coding: utf-8 -*-
from flask import request, abort
from views.baseapi import BaseApiBlueprint, BaseApiCallback
class MonsterBlueprint(BaseApiBlueprint):
@property
def datamapper(self):
return self.basemapper.monster
@property
def campaignmapper(self):
return self.basemapper.campaign
def get_allowed_campaign_ids(self, user_id):
return [None] + [
c.id for c in self.campaignmapper.getByDmUserId(user_id)
]
@BaseApiCallback('index')
@BaseApiCallback('overview')
@BaseApiCallback('show')
@BaseApiCallback('new')
@BaseApiCallback('edit')
@BaseApiCallback('api_list')
@BaseApiCallback('api_get')
@BaseApiCallback('api_copy')
@BaseApiCallback('api_post')
@BaseApiCallback('api_patch')
@BaseApiCallback('api_delete')
@BaseApiCallback('api_recompute')
def adminOrDmOnly(self, *args, **kwargs):
if not self.checkRole(['admin', 'dm']):
abort(403)
@BaseApiCallback('raw')
def adminOnly(self, *args, **kwargs):
if not self.checkRole(['admin']):
abort(403)
@BaseApiCallback('api_list.objects')
def adminOrGenericMultiple(self, objs):
if self.checkRole(['admin']):
return
campaign_ids = self.get_allowed_campaign_ids(
request.user.id
)
ob | js[:] = [
obj
for obj in objs
if obj.user_id == request.user.id \
| or obj.campaign_id in campaign_ids
]
@BaseApiCallback('api_copy.original')
@BaseApiCallback('api_get.object')
def adminOrGenericSingle(self, obj):
if self.checkRole(['admin']):
return
campaign_ids = self.get_allowed_campaign_ids(
request.user.id
)
if obj.user_id != request.user.id \
and obj.campaign_id not in campaign_ids:
abort(403)
@BaseApiCallback('api_patch.object')
@BaseApiCallback('api_delete.object')
def adminOrOwnedSingle(self, obj):
if self.checkRole(['admin']):
return
if obj.user_id is None:
obj.user_id = request.user.id
elif obj.user_id != request.user.id:
abort(403)
@BaseApiCallback('api_post.object')
@BaseApiCallback('api_copy.object')
def setOwner(self, obj):
obj.user_id = request.user.id
@BaseApiCallback('api_copy.object')
def changeName(self, obj, *args, **kwargs):
obj.name += " (Copy)"
def get_blueprint(basemapper, config):
return '/monster', MonsterBlueprint(
'monster',
__name__,
basemapper,
config,
template_folder='templates'
)
|
damdam-s/account-financial-tools | account_move_line_search_extension/__init__.py | Python | agpl-3.0 | 1,056 | 0 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open | Source Management Solution
#
# Copyright (c) 2013-2015 Noviat nv/sa (www.noviat.com).
| #
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account
from . import res_partner
from . import ir_actions
|
xuanthuong/golfgame | models/hole.py | Python | mit | 1,532 | 0.013055 | # -*- coding: utf-8 -*-
# Description: gmf_hole table
# By Thuong.Tran
# Date: 31 Aug 2017
from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float
from sqlalchemy import select
class hole():
def __init__(self, db_url):
_engine = create_engine(db_url)
_connection = _engine.connect()
_metadata = MetaData()
_gmf_hole = Table("gmf_hole", _metadata,
Column("HOLE_ID", Integer, primary_key=True),
Column("PLER_ID", Integer),
Column("HOLE_TP", Text),
| Column("HOLE_DT", DateTime),
Column("WK_DY", Text),
Column("GRP_ | TP", Text),
Column("WRKR_1_ID", Integer),
Column("WRKR_2_ID", Integer),
Column("SCRE_NO", Float))
_metadata.create_all(_engine)
self.connection = _connection
self.gmf_hole = _gmf_hole
pass
def insert_to(self, data):
is_valid = True
if is_valid:
ins_query = self.gmf_hole.insert().values(data)
r = self.connection.execute(ins_query)
return r.lastrowid
def get_all(self):
s = select([self.gmf_hole]).order_by('HOLE_ID')
result = self.connection.execute(s)
return result
def get_hole_type_by_id(self, hole_id):
s = select([self.gmf_hole]).where(self.gmf_hole.c.HOLE_ID == hole_id)
result = self.connection.execute(s)
for res in result:
return res
|
PaulWay/insights-core | insights/parsers/messages.py | Python | apache-2.0 | 3,079 | 0.001949 | """
Messages - file ``/var/log/messages``
=====================================
Reads the ``/var/log/messages`` file as a standard LogFileOutput class parser.
The important function is ``get(s)``, which finds all lines with the string
**s** and parses them into dictionaries with the following keys:
* ``timestamp`` - the time the log line was written
* ``procname`` - the process or facility that wrote the line
* ``hostname`` - the host that generated the log line
* ``message`` - the rest of the message (after the process name)
* ``raw_message`` - the raw message before being split.
It is best to use filters and/or scanners with the messages log, to speed up
parsing. These work on the raw message, before being parsed.
Sample log lines::
May 18 15:13:34 lxc-rhel68-sat56 jabberd/sm[11057]: session started: jid=rhn-dispatcher-sat@lxc-rhel6-sat56.redhat.com/superclient
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon
May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM...
May 18 15:24:28 lxc-rhel68-sat56 yum[11597]: Installed: lynx-2.8.6-27.el6.x86_64
May 18 15:36:19 lxc-rhel68-sat56 yum[11954]: Updated: sos-3.2-40.el6.noarch
Examples:
>>> Messages.filters.append('wrapper')
>>> Messages.token_scan('daemon_start', 'Wrapper Started as Daemon')
>>> msgs = shared[Messages]
>>> len(msgs.lines)
>>> wrapper_msgs = msgs.get('wrapper') # Can only rely on lines filtered being present
>>> wrapper_msgs[0]
{'timestamp': 'May 18 15:13:36', 'hostname': 'lxc-rhel68-sat56',
'procname': wrapper[11375]', 'message': '--> Wrapper Started as Daemon',
'raw_message': 'May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon'
}
>>> msgs.daemon_start # Token set if matching lines present in logs
True
"""
from .. import LogFileOutput, parser
@parser('messages')
class Messages(LogFileOutput):
"""
Read the ``/var/log/messages`` file using the LogFileOutput parser class.
"""
def get(self, s):
"""
Parameters:
s (str): String to search for
Returns:
([dicts]): all lines that contain 's' as a list of dictionaries
Examples::
[
{'timestamp':'May 18 14:24:14',
'procname': 'kernel',
'hostname':'lxc-rhel68-sat56',
'message': '...',
'raw_message': '...: ...'
}, ...
]
"""
r = []
for l in self.lines:
if s in l:
info, msg = [i.strip() for i in l.split(': ', 1)]
msg_info = {
'messag | e': msg,
'raw_message': l
}
info_splits = info.split()
if len(info_splits) == 5:
msg_info['timestamp'] = ' '.join(info_splits[:3])
msg_info['hostname'] = info_splits[3]
| msg_info['procname'] = info_splits[4]
r.append(msg_info)
return r
|
chienlieu2017/it_management | odoo/addons/payment_buckaroo/tests/test_buckaroo.py | Python | gpl-3.0 | 7,079 | 0.002684 | # -*- coding: utf-8 -*-
from lxml import objectify
import urlparse
import odoo
from odoo.addons.payment.models.payment_acquirer import ValidationError
from odoo.addons.payment.tests.common import PaymentAcquirerCommon
from odoo.addons.payment_buckaroo.controllers.main import BuckarooController
from odoo.tools import mute_logger
@odoo.tests.common.at_install(False)
@odoo.tests.common.post_install(False)
class BuckarooCommon(PaymentAcquirerCommon):
def setUp(self):
super(BuckarooCommon, self).setUp()
# get the buckaroo account
self.buckaroo = self.env.ref('payment.payment_acquirer_buckaroo')
@odoo.tests.common.at_install(False)
@odoo.tests.common.post_install(False)
class BuckarooForm(BuckarooCommon):
def test_10_Buckaroo_form_render(self):
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
# be sure not to do stupid things
self.assertEqual(self.buckaroo.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering
# ----------------------------------------
form_values = {
'add_returndata': None,
'Brq_websitekey': self.buckaroo.brq_websitekey,
'Brq_amount': '2240.0',
'Brq_currency': 'EUR',
'Brq_invoicenumber': 'SO004',
'Brq_signature': '1b8c10074c622d965272a91a9e88b5b3777d2474', # update me
'brq_test': 'True',
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': 'en-US',
}
# render the button
res = self.buckaroo.render(
'SO004', 2240.0, self.currency_euro.id,
partner_id=None,
partner_values=self.buyer_values)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx = self.env['payment.transaction'].create({
'amount': 2240.0,
'acquirer_id': self.buckaroo.id,
'currency_id': self.currency_euro.id,
'reference': 'SO004',
'partner_id': self.buyer_id,
})
# render the button
res = self.buckaroo_id.render(
'should_be_erased', 2240.0, self.currency_euro,
tx_id=tx.id,
partner_id=None,
partner_values=self.buyer_values)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://testcheckout.buckaroo.nl/html/', 'Buckaroo: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'Buckaroo: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('odoo.addons.payment_buckaroo.models.payment', 'ValidationError')
def test_20_buckaroo_form_management(self):
# be sure not to do stupid thing
self.assertEqual(self.buckaroo.environment, 'test', 'test without test environment')
# typical data posted by buckaroo after client has successfully paid
buckaroo_post_data = {
'BRQ_RETURNDATA': u'',
'BRQ_AMOUNT': u'2240.00',
'BRQ_CURRENCY': u'EUR',
'BRQ_CUSTOMER_NAME': u'Jan de Tester',
'BRQ_INVOICENUMBER': u'SO004',
'brq_payment': u'573311D081B04069BD6336001611DBD4',
'BRQ_PAYMENT_METHOD': u'paypal',
'BRQ_SERVICE_PAYPAL_PAYERCOUNTRY': u'NL',
'BRQ_SERVICE_PAYPAL_PAYEREMAIL': u'fhe@odoo.com',
'BRQ_SERVICE_PAYPAL_PAYERFIRSTNAME': u'Jan',
'BRQ_SERVICE_PAYPAL_PAYERLASTNAME': u'Tester',
'BRQ_SERVICE_PAYPAL_PAYERMIDDLENAME': u'de',
'BRQ_SERVICE_PAYPAL_PAYERSTATUS': u'verified',
'Brq_signature': u'175d82dd53a02bad393fee32cb1eafa3b6fbbd91',
'BRQ_STATUSCODE': u'190',
'BRQ_STATUSCODE_DETAIL': u'S001',
'BRQ_STATUSMESSA | GE': u'Transaction successfully processed',
'BRQ_TEST': u'true',
'BRQ_TIMESTAMP': u'2014-05-08 12:41:21',
'BRQ_TRANSACTIONS': u'D6106678E1D54EEB8093F5B3AC42EA7B',
'BRQ_WEBSITEKEY': u'5xTGyGyPyl',
}
# should raise error about unknown tx
with self.assertRaises(ValidationE | rror):
self.env['payment.transaction'].form_feedback(buckaroo_post_data, 'buckaroo')
tx = self.env['payment.transaction'].create({
'amount': 2240.0,
'acquirer_id': self.buckaroo.id,
'currency_id': self.currency_euro.id,
'reference': 'SO004',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france.id})
# validate it
tx.form_feedback(buckaroo_post_data, 'buckaroo')
# check state
self.assertEqual(tx.state, 'done', 'Buckaroo: validation did not put tx into done state')
self.assertEqual(tx.acquirer_reference, buckaroo_post_data.get('BRQ_TRANSACTIONS'), 'Buckaroo: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'acquirer_reference': False})
# now buckaroo post is ok: try to modify the SHASIGN
buckaroo_post_data['BRQ_SIGNATURE'] = '54d928810e343acf5fb0c3ee75fd747ff159ef7a'
with self.assertRaises(ValidationError):
tx.form_feedback(buckaroo_post_data, 'buckaroo')
# simulate an error
buckaroo_post_data['BRQ_STATUSCODE'] = 2
buckaroo_post_data['BRQ_SIGNATURE'] = '4164b52adb1e6a2221d3d8a39d8c3e18a9ecb90b'
tx.form_feedback(buckaroo_post_data, 'buckaroo')
# check state
self.assertEqual(tx.state, 'error', 'Buckaroo: erroneous validation did not put tx into error state')
|
Ruide/angr-dev | cle/cle/memory.py | Python | bsd-2-clause | 10,840 | 0.00369 | import bisect
import struct
import cffi
__all__ = ('Clemory',)
# TODO: Further optimization is possible now that the list of backers is sorted
class Clemory(object):
"""
An object representing a memory space. Uses "backers" and "updates" to separate the concepts of loaded and written
memory and make lookups more efficient.
Accesses can be made with [index] notation.
"""
def __init__(self, arch, root=False):
self._arch = arch
self._backers = [] # tuple of (start, str)
self._updates = {}
self._pointer = 0
self._root = root
self._cbackers = [ ] # tuple of (start, cdata<buffer>)
self._needs_flattening_personal = True
def add_backer(self, start, data):
"""
Adds a backer to the memory.
:param start: The address where the backer should be loaded.
:param data: The backer itself. Can be either a string or another :class:`Clemory`.
"""
if not isinstance(data, (str, Clemory)):
raise TypeError("Data must be a string or a Clemory")
if start in self:
raise ValueError("Address %#x is already backed!" % start)
if isinstance(data, Clemory) and data._root:
raise ValueError("Cannot add a root clemory as a backer!")
bisect.insort(self._backers, (start, data))
self._needs_flattening_personal = True
def update_backer(self, start, data):
if not isinstance(data, (str, Clemory)):
raise TypeError("Data must be a string or a Clemory")
for i, (oldstart, _) in enumerate(self._backers):
if oldstart == start:
self._backers[i] = (start, data)
self._needs_flattening_personal = True
break
else:
raise ValueError("Can't find backer to update")
def remove_backer(self, start):
for i, (oldstart, _) in enumerate(self._backers):
if oldstart == start:
self._backers.pop(i)
self._needs_flattening_personal = True
break
else:
raise ValueError("Can't find backer to remove")
def __iter__(self):
for start, string in self._backers:
if isinstance(string, str):
for x in xrange(len(string)):
yield start + x
else:
for x in string:
yield start + x
def __getitem__(self, k):
return self.get_byte(k)
def get_byte(self, k, orig=False):
if not orig and k in self._updates:
return self._updates[k]
else:
for start, data in self._backers:
if isinstance(data, str):
if 0 <= k - start < len(data):
return data[k - start]
elif isinstance(data, Clemory):
try:
return data.get_byte(k - start, orig=orig)
except KeyError:
pass
raise KeyError(k)
def __setitem__(self, k, v):
if k not in self:
raise IndexError(k)
self._updates[k] = v
self._needs_flattening_personal = True
def __contains__(self, k):
try:
self.__getitem__(k)
return True
except KeyError:
return False
def __getstate__(self):
self._cbackers = [ ]
self._needs_flattening_personal = True
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
def read_bytes(self, addr, n, orig=False):
"""
Read up to `n` bytes at address `addr` in memory and return an array of bytes.
Reading will stop at the beginning of the first unallocated region found, or when
`n` bytes have been read.
"""
b = []
try:
for i in range(addr, addr+n):
b.append(self.get_byte(i, orig=orig))
except KeyError:
pass
return b
def write_bytes(self, addr, data):
"""
Write bytes from `data` at address `addr`.
"""
for i, c in enumerate(data):
self[addr+i] = c
def write_bytes_to_backer(self, addr, data):
"""
Write bytes from `data` at address `addr` to backer instead of self._updates. This is only needed when writing a
huge amount of data.
"""
pos = addr
to_insert = [ ]
i = 0
|
while i < len(self._backers) and data:
start, backer_data = self._backers[i] # self._backers is always sorted
size = len(backer_data)
stop = start + size
if | pos >= start:
if pos < stop:
if pos + len(data) > stop:
new_backer_data = backer_data[ : pos - start] + data[ : stop - pos]
self._backers[i] = (start, new_backer_data)
# slicing data
data = data[ stop - pos : ]
pos = stop
else:
new_backer_data = backer_data[ : pos - start] + data + backer_data[pos - start + len(data) : ]
self._backers[i] = (start, new_backer_data)
# We are done
break
i += 1
else:
# Look forward and see if we should insert a new backer
if i < len(self._backers) - 1:
if pos + len(data) <= start:
to_insert.append((pos, data[ : start - pos]))
data = data[start - pos : ]
pos = start
else:
# we reach the end of our data to insert
to_insert.append((pos, data))
break
else:
# seems we reach the end of the list...
to_insert.append((pos, data))
break
# Insert the blocks that are needed to insert into self._backers
for seg_start, seg_data in to_insert:
bisect.insort(self._backers, (seg_start, seg_data))
# Set the flattening_needed flag
self._needs_flattening_personal = True
def read_addr_at(self, where, orig=False):
"""
Read addr stored in memory as a series of bytes starting at `where`.
"""
by = ''.join(self.read_bytes(where, self._arch.bytes, orig=orig))
return struct.unpack(self._arch.struct_fmt(), by)[0]
def write_addr_at(self, where, addr):
"""
Writes `addr` into a series of bytes in memory at `where`.
"""
by = struct.pack(self._arch.struct_fmt(), addr % (2**self._arch.bits))
self.write_bytes(where, by)
@property
def _stride_repr(self):
out = []
for start, data in self._backers:
if isinstance(data, str):
out.append((start, bytearray(data)))
else:
out += map(lambda (substart, subdata), start=start: (substart+start, subdata), data._stride_repr)
for key, val in self._updates.iteritems():
for start, data in out:
if start <= key < start + len(data):
data[key - start] = val
break
else:
raise ValueError('There was an update to a Clemory not on top of any backer')
return out
@property
def stride_repr(self):
"""
Returns a representation of memory in a list of (start, end, data) where data is a string.
"""
return map(lambda (start, bytearr): (start, start+len(bytearr), str(bytearr)), self._stride_repr)
def seek(self, value):
"""
The stream-like function that sets the "file's" current position. Use with :func:`read()`.
:param value: The position to seek to.
"""
self._pointer = value
def read(self, nbytes):
"""
The stream-like function that reads up to a num |
biddyweb/merchant | billing/gateways/stripe_gateway.py | Python | bsd-3-clause | 8,557 | 0.000467 | from billing import Gateway, GatewayNotConfigured
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
from billing.utils.credit_card import InvalidCard, Visa, MasterCard, \
AmericanExpress, Discover, CreditCard
import stripe
from django.conf import settings
class StripeGateway(Gateway):
supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
supported_countries = ['US']
default_currency = "USD"
homepage_url = "https://stripe.com/"
display_name = "Stripe"
def __init__(self):
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("stripe"):
raise GatewayNotConfigured("The '%s' gateway is not correctly "
"configured." % self.display_name)
stripe_settings = merchant_settings["stripe"]
stripe.api_key = stripe_settings['API_KEY']
self.stripe = stripe
def purchase(self, amount, credit_card, options=None):
card = credit_card
if isinstance(credit_card, CreditCard):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
card = {
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
}
try:
response = self.stripe.Charge.create(
amount=int(amount * 100),
currency=self.default_currency.lower(),
card=card)
except self.stripe.CardError as error:
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=error)
return {'status': 'FAILURE', 'response': error}
transaction_was_successful.send(sender=self,
type="purchase",
response=response)
return {'status': 'SUCCESS', 'response': response}
def store(self, credit_card, options=None):
card = credit_card
if isinstance(credit_card, CreditCard):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
card = {
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
}
try:
customer = self.stripe.Customer.create(card=card)
except (self.stripe.CardError, self.stripe.InvalidRequestError) as error:
transaction_was_unsuccessful.send(sender=self,
type="store",
response=error)
return {'status': 'FAILURE', 'response': error}
transaction_was_successful.send(sender=self,
type="store",
response=customer)
return {'status': 'SUCCESS', 'response': customer}
def recurring(self, credit_card, options=None):
| card = credit_card
if isinstance(credit_card, CreditCard):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
card = {
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
| }
try:
plan_id = options['plan_id']
self.stripe.Plan.retrieve(options['plan_id'])
try:
response = self.stripe.Customer.create(
card=card,
plan=plan_id
)
transaction_was_successful.send(sender=self,
type="recurring",
response=response)
return {"status": "SUCCESS", "response": response}
except self.stripe.CardError as error:
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=error)
return {"status": "FAILURE", "response": error}
except self.stripe.InvalidRequestError as error:
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=error)
return {"status": "FAILURE", "response": error}
except TypeError as error:
transaction_was_unsuccessful.send(sender=self,
type="recurring",
response=error)
return {"status": "FAILURE", "response": "Missing Plan Id"}
def unstore(self, identification, options=None):
try:
customer = self.stripe.Customer.retrieve(identification)
response = customer.delete()
transaction_was_successful.send(sender=self,
type="unstore",
response=response)
return {"status": "SUCCESS", "response": response}
except self.stripe.InvalidRequestError as error:
transaction_was_unsuccessful.send(sender=self,
type="unstore",
response=error)
return {"status": "FAILURE", "response": error}
def credit(self, identification, money=None, options=None):
try:
charge = self.stripe.Charge.retrieve(identification)
response = charge.refund(amount=money)
transaction_was_successful.send(sender=self,
type="credit",
response=response)
return {"status": "SUCCESS", "response": response}
except self.stripe.InvalidRequestError as error:
transaction_was_unsuccessful.send(sender=self,
type="credit",
response=error)
return {"status": "FAILURE", "error": error}
def authorize(self, money, credit_card, options=None):
card = credit_card
if isinstance(credit_card, CreditCard):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
card = {
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
}
try:
token = self.stripe.Token.create(
card=card,
amount=int(money * 100),
)
transaction_was_successful.send(sender=self,
type="authorize",
response=token)
return {'status': "SUCCESS", "response": token}
except self.stripe.InvalidRequestError as error:
transaction_was_unsuccessful.send(sender=self,
type="authorize",
response=error)
return {"status": "FAILURE", "response": error}
def capture(self, money, authorization, options=None):
try:
response = self.stripe.Charge.create(
amount=int(money * 100),
card=authorization,
currency=self.default_currency.lower()
)
transaction_was_successful.send(sender=self,
type="capture",
respo |
yeyanchao/calibre | src/calibre/library/server/browse.py | Python | gpl-3.0 | 35,184 | 0.005088 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import operator, os, json, re
from binascii import hexlify, unhexlify
from collections import OrderedDict
import cherrypy
from calibre.constants import filesystem_encoding
from calibre import isbytestring, force_unicode, fit_image, \
prepare_string_for_xml
from calibre.utils.filenames import ascii_filename
from calibre.utils.config import prefs
from calibre.utils.icu import sort_key
from calibre.utils.magick import Image
from calibre.library.comments import comments_to_html
from calibre.library.server import custom_fields_to_display
from calibre.library.field_metadata import category_icon_map
from calibre.library.server.utils import quote, unquote
def xml(*args, **kwargs):
ans = prepare_string_for_xml(*args, **kwargs)
return ans.replace(''', ''')
def render_book_list(ids, prefix, suffix=''): # {{{
pages = []
num = len(ids)
pos = 0
delta = 25
while ids:
| page = list(ids[:delta])
pages.append((page, pos))
ids = ids[delta:]
pos += len(page)
page_template = u'''\
<div class="page" id="page{0}">
<div class="load_data" title="{1}">
<span class="url" title="{prefix}/browse/booklist_page"></span>
<span cl | ass="start" title="{start}"></span>
<span class="end" title="{end}"></span>
</div>
<div class="loading"><img src="{prefix}/static/loading.gif" /> {2}</div>
<div class="loaded"></div>
</div>
'''
pagelist_template = u'''\
<div class="pagelist">
<ul>
{pages}
</ul>
</div>
'''
rpages, lpages = [], []
for i, x in enumerate(pages):
pg, pos = x
ld = xml(json.dumps(pg), True)
start, end = pos+1, pos+len(pg)
rpages.append(page_template.format(i, ld,
xml(_('Loading, please wait')) + '…',
start=start, end=end, prefix=prefix))
lpages.append(' '*20 + (u'<li><a href="#" title="Books {start} to {end}"'
' onclick="gp_internal(\'{id}\'); return false;"> '
'{start} to {end}</a></li>').format(start=start, end=end,
id='page%d'%i))
rpages = u'\n\n'.join(rpages)
lpages = u'\n'.join(lpages)
pagelist = pagelist_template.format(pages=lpages)
templ = u'''\
<h3>{0} {suffix}</h3>
<div id="booklist">
<div id="pagelist" title="{goto}">{pagelist}</div>
<div class="listnav topnav">
{navbar}
</div>
{pages}
<div class="listnav bottomnav">
{navbar}
</div>
</div>
'''
gp_start = gp_end = ''
if len(pages) > 1:
gp_start = '<a href="#" onclick="goto_page(); return false;" title="%s">' % \
(_('Go to') + '…')
gp_end = '</a>'
navbar = u'''\
<div class="navleft">
<a href="#" onclick="first_page(); return false;">{first}</a>
<a href="#" onclick="previous_page(); return false;">{previous}</a>
</div>
<div class="navmiddle">
{gp_start}
<span class="start">0</span> to <span class="end">0</span>
{gp_end}of {num}
</div>
<div class="navright">
<a href="#" onclick="next_page(); return false;">{next}</a>
<a href="#" onclick="last_page(); return false;">{last}</a>
</div>
'''.format(first=_('First'), last=_('Last'), previous=_('Previous'),
next=_('Next'), num=num, gp_start=gp_start, gp_end=gp_end)
return templ.format(_('Browsing %d books')%num, suffix=suffix,
pages=rpages, navbar=navbar, pagelist=pagelist,
goto=xml(_('Go to'), True) + '…')
# }}}
def utf8(x): # {{{
if isinstance(x, unicode):
x = x.encode('utf-8')
return x
# }}}
def render_rating(rating, url_prefix, container='span', prefix=None): # {{{
if rating < 0.1:
return '', ''
added = 0
if prefix is None:
prefix = _('Average rating')
rstring = xml(_('%(prefix)s: %(rating).1f stars')%dict(
prefix=prefix, rating=rating if rating else 0.0),
True)
ans = ['<%s class="rating">' % (container)]
for i in range(5):
n = rating - added
x = 'half'
if n <= 0.1:
x = 'off'
elif n >= 0.9:
x = 'on'
ans.append(
u'<img alt="{0}" title="{0}" src="{2}/static/star-{1}.png" />'.format(
rstring, x, url_prefix))
added += 1
ans.append('</%s>'%container)
return u''.join(ans), rstring
# }}}
def get_category_items(category, items, restriction, datatype, prefix): # {{{
if category == 'search':
items = [x for x in items if x.name != restriction]
def item(i):
templ = (u'<div title="{4}" class="category-item">'
'<div class="category-name">'
'<a href="{5}{3}" title="{4}">{0}</a></div>'
'<div>{1}</div>'
'<div>{2}</div></div>')
rating, rstring = render_rating(i.avg_rating, prefix)
if i.use_sort_as_name:
name = xml(i.sort)
else:
name = xml(i.name)
if datatype == 'rating':
name = xml(_('%d stars')%int(i.avg_rating))
id_ = i.id
if id_ is None:
id_ = hexlify(force_unicode(name).encode('utf-8'))
id_ = xml(str(id_))
desc = ''
if i.count > 0:
desc += '[' + _('%d books')%i.count + ']'
q = i.category
if not q:
q = category
href = '/browse/matches/%s/%s'%(quote(q), quote(id_))
return templ.format(xml(name), rating,
xml(desc), xml(href, True), rstring, prefix)
items = list(map(item, items))
return '\n'.join(['<div class="category-container">'] + items + ['</div>'])
# }}}
class Endpoint(object): # {{{
'Manage encoding, mime-type, last modified, cookies, etc.'
def __init__(self, mimetype='text/html; charset=utf-8', sort_type='category'):
self.mimetype = mimetype
self.sort_type = sort_type
self.sort_kwarg = sort_type + '_sort'
self.sort_cookie_name = 'calibre_browse_server_sort_'+self.sort_type
def __call__(eself, func):
def do(self, *args, **kwargs):
if 'json' not in eself.mimetype:
sort_val = None
cookie = cherrypy.request.cookie
if cookie.has_key(eself.sort_cookie_name):
sort_val = cookie[eself.sort_cookie_name].value
kwargs[eself.sort_kwarg] = sort_val
# Remove AJAX caching disabling jquery workaround arg
kwargs.pop('_', None)
ans = func(self, *args, **kwargs)
cherrypy.response.headers['Content-Type'] = eself.mimetype
updated = self.db.last_modified()
cherrypy.response.headers['Last-Modified'] = \
self.last_modified(max(updated, self.build_time))
ans = utf8(ans)
return ans
do.__name__ = func.__name__
return do
# }}}
class BrowseServer(object):
def add_routes(self, connect):
base_href = '/browse'
connect('browse', base_href, self.browse_catalog)
connect('browse_catalog', base_href+'/category/{category}',
self.browse_catalog)
connect('browse_category_group',
base_href+'/category_group/{category}/{group}',
self.browse_category_group)
connect('browse_matches',
base_href+'/matches/{category}/{cid}',
self.browse_matches)
connect('browse_booklist_page',
base_href+'/booklist_page',
self.browse_booklist_page)
connect('brow |
faircloth-lab/rhino | rhino/core.py | Python | bsd-3-clause | 2,759 | 0.006162 |
import os
import sys
import json
import time
import numpy
import dendropy
from collections import defaultdict
import pdb
def parse_site_rates(rate_file, correction = 1, test = False, count = 0):
"""Parse the site rate file returned from hyphy to a vector of rates"""
# for whatever reason, when run in a virtualenv (and perhaps in other
# cases, the file does not seem to be written quite before we try
# to read it. so, pause and try to re-read up to three-times.
try:
data = json.load(open(rate_file, 'r'))
except IOError as e:
if count <= 3:
count += 1
time.sleep(0.1)
parse_site_rates(rate_file, correction, test, count)
else:
raise IOError("Cannot open {0}: {1}".format(rate_file, e))
rates = numpy.array([line["rate"] for line in data["sites"]["rates"]])
corrected = rates/correction
if not test:
data["sites"]["corrected_rates"] = [{"site":k + 1,"rate":v} \
for k,v in enumerate(corrected)]
json.dump(data, open(rate_file,'w'), indent = 4)
return corrected
def correct_branch_lengths(tree_file, format, output_dir):
"""Scale branch lengths to values shorter than 100"""
tree = dendropy.Tree.get_from_path(tree_file, format)
depth = tree.seed_node.distance_from_tip()
mean_branch_length = tree.length()/(2 * len(tree.leaf_nodes()) - 3)
string_len = len(str(int(mean_branch_length + 0.5)))
if string_len > 1:
correction_factor = 10 ** string_len
else:
correction_factor = 1
for edge in tree.preorder_edge_iter():
if edge.length:
edge.length /= correction_factor
pth = os.path.join(output_dir, '{0}.corrected.newick'.format(
os.path.basename(tree_file)
))
tree.write_to_path(pth, 'new | ick')
return pth, correction_factor
def get_net_pi_for_periods(pi, times):
"""Sum across the PI values for the requested times"""
sums = numpy.nansum(pi, axis=1)[times]
return dict(zip(times, sums))
def get_informative_sites(alignment, threshold=4):
"""Returns a list, where True indicates a site which was over the threshold
for informativeness.
"""
taxa = dendropy.DnaCharacterMatrix.get_from_p | ath(alignment, 'nexus')
results = defaultdict(int)
for cells in taxa.vectors():
assert len(cells) == taxa.vector_size # should all have equal lengths
for idx, cell in enumerate(cells):
results[idx] += 1 if str(cell).upper() in "ATGC" else 0
return numpy.array([1 if results[x] >= threshold else numpy.nan for x in sorted(results)])
def cull_uninformative_rates(rates, inform):
"""Zeroes out rates which are uninformative"""
return rates * inform
|
sunfounder/SunFounder_PiSmart | pismart/SpeakPython/SpeakPythonJSGFLexer.py | Python | gpl-2.0 | 38,081 | 0.012815 | # $ANTLR 3.4 SpeakPythonJSGF.g 2015-09-25 20:19:04
import sys
from antlr3 import *
#from antlr3.compat import set, frozenset
from sets import Set as set, ImmutableSet as frozenset
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
EOF=-1
ARROW=4
AT=5
AT_GLOBAL_OPTIONS=6
AT_OPTIONS=7
AT_RESULTS=8
AT_TESTS=9
B_ARROW=10
COMMA=11
COMMENT=12
END_DQUOTE_STRING=13
END_SQUOTE_STRING=14
EQ=15
HASH_NAME=16
INSIDE_DQUOTE_STRING=17
INSIDE_SQUOTE_STRING=18
KLEENE=19
LA_BR=20
LC_BR=21
LR_BR=22
LS_BR=23
NEWLINE=24
NUM=25
OR=26
PLUS=27
QSTN=28
QUOTE_STRING=29
RA_BR=30
RC_BR=31
REGEX=32
REGEX_LABEL=33
RR_BR=34
RS_BR=35
SEMI=36
STAR=37
START_DQUOTE_STRING=38
START_SQUOTE_STRING=39
TILDE=40
UNDERSCORE_NUM=41
VAR_NAME=42
WHITE_SPACE=43
WORD=44
class SpeakPythonJSGFLexer(Lexer):
grammarFileName = "SpeakPythonJSGF.g"
api_version = 1
def __init__(self, input=None, state=None):
if state is None:
state = RecognizerSharedState()
super(SpeakPythonJSGFLexer, self).__init__(input, state)
self.delegates = []
commentMode = False;
multiCommentMode = False;
stringMode = False;
# $ANTLR start "COMMENT"
def mCOMMENT(self, ):
try:
_type = COMMENT
_channel = DEFAULT_CHANNEL
# SpeakPythonJSGF.g:215:8: ( '||' (~ ( '\\r' | '\\n' ) )* )
# SpeakPythonJSGF.g:215:10: '||' (~ ( '\\r' | '\\n' ) )*
pass
self.match("||")
# SpeakPythonJSGF.g:215:15: (~ ( '\\r' | '\\n' ) )*
while True: #loop1
alt1 = 2
LA1_0 = self.input.LA(1)
if ((0 <= LA1_0 <= 9) or (11 <= LA1_0 <= 12) or (14 <= LA1_0 <= 65535)) :
alt1 = 1
if alt1 == 1:
# SpeakPythonJSGF.g:
pass
if (0 <= self.input.LA(1) <= 9) or (11 <= self.input.LA(1) <= 12) or (14 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop1
#action start
_channel = HIDDEN;
#action end
self._state.type = _type
self._state.channel = _channel
finally:
pass
# $ANTLR end "COMMENT"
# $ANTLR start "START_SQUOTE_STRING"
def mSTART_SQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:224:29: ({...}? => '\\'' )
# SpeakPythonJSGF.g:224:30: {...}? => '\\''
pass
if not ((not (self.commentMode or self.multiCommentMode))):
raise FailedPredicateException(self.input, "START_SQUOTE_STRING", "not (self.commentMode or self.multiCommentMode)")
self.match(39)
#action start
self.stringMode = True;
#action end
finally:
pass
# $ANTLR end "START_SQUOTE_STRING"
# $ANTLR start "START_DQUOTE_STRING"
def mSTART_DQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:225:29: ({...}? => '\"' )
# SpeakPythonJSGF.g:225:30: {...}? => '\"'
pass
if not ((not (self.commentMode or self.multiCommentMode))):
raise FailedPredicateException(self.input, "START_DQUOTE_STRING", "not (self.commentMode or self.multiCommentMode)")
self.match(34)
#action start
self.stringMode = True;
#ac | tion end |
finally:
pass
# $ANTLR end "START_DQUOTE_STRING"
# $ANTLR start "INSIDE_SQUOTE_STRING"
def mINSIDE_SQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:227:30: ({...}? => (~ ( '\\'' ) )* )
# SpeakPythonJSGF.g:227:31: {...}? => (~ ( '\\'' ) )*
pass
if not ((self.stringMode)):
raise FailedPredicateException(self.input, "INSIDE_SQUOTE_STRING", "self.stringMode")
# SpeakPythonJSGF.g:227:52: (~ ( '\\'' ) )*
while True: #loop2
alt2 = 2
LA2_0 = self.input.LA(1)
if ((0 <= LA2_0 <= 38) or (40 <= LA2_0 <= 65535)) :
alt2 = 1
if alt2 == 1:
# SpeakPythonJSGF.g:
pass
if (0 <= self.input.LA(1) <= 38) or (40 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop2
finally:
pass
# $ANTLR end "INSIDE_SQUOTE_STRING"
# $ANTLR start "INSIDE_DQUOTE_STRING"
def mINSIDE_DQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:228:30: ({...}? => (~ ( '\"' ) )* )
# SpeakPythonJSGF.g:228:31: {...}? => (~ ( '\"' ) )*
pass
if not ((self.stringMode)):
raise FailedPredicateException(self.input, "INSIDE_DQUOTE_STRING", "self.stringMode")
# SpeakPythonJSGF.g:228:52: (~ ( '\"' ) )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if ((0 <= LA3_0 <= 33) or (35 <= LA3_0 <= 65535)) :
alt3 = 1
if alt3 == 1:
# SpeakPythonJSGF.g:
pass
if (0 <= self.input.LA(1) <= 33) or (35 <= self.input.LA(1) <= 65535):
self.input.consume()
else:
mse = MismatchedSetException(None, self.input)
self.recover(mse)
raise mse
else:
break #loop3
finally:
pass
# $ANTLR end "INSIDE_DQUOTE_STRING"
# $ANTLR start "END_SQUOTE_STRING"
def mEND_SQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:230:27: ({...}? => '\\'' )
# SpeakPythonJSGF.g:230:28: {...}? => '\\''
pass
if not ((self.stringMode)):
raise FailedPredicateException(self.input, "END_SQUOTE_STRING", "self.stringMode")
self.match(39)
#action start
self.stringMode = False;
#action end
finally:
pass
# $ANTLR end "END_SQUOTE_STRING"
# $ANTLR start "END_DQUOTE_STRING"
def mEND_DQUOTE_STRING(self, ):
try:
# SpeakPythonJSGF.g:231:27: ({...}? => '\"' )
# SpeakPythonJSGF.g:231:28: {...}? => '\"'
pass
if not ((self.stringMode)):
raise FailedPredicateException(self.input, "END_DQUOTE_STRING", "self.stringMode")
self.match(34)
#action start
self.stringMode = False;
#action end
finally:
pass
# $ANTLR end "END_DQUOTE_STRING"
# $ANTLR start "QUOTE_STRING"
def mQUOTE_STRING(self, ):
try:
_type = QUOTE_STRING
_channel = DEFAULT_CHANNEL
# SpeakPythonJSGF.g:233:13: ( ( START_SQUOTE_STRING INSIDE_SQUOTE_STRING END_SQUOTE_STRING ) | ( START_DQUOTE_STRING INSIDE_DQUOTE_STRING END_DQUOTE_STRING ) )
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == 39) and ((not (self.commentMode or self.multiCommentMode))):
alt4 = 1
elif (LA4_0 == 34) and ((not (self.commentMode or self.multiCommentMode))):
alt4 = 2
else:
nvae = NoViableAltException("", 4, 0, self.input)
raise nvae
if alt4 == 1:
# SpeakPythonJSGF.g:233:15: ( START_SQUOTE_STRING INSIDE_SQUOTE_STRING END_SQUOTE_STRING )
pass
# SpeakPythonJSGF.g:233: |
mincoin-project/mincoin | qa/rpc-tests/disablewallet.py | Python | mit | 1,874 | 0.00587 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2015-2019 The Mincoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Exercise API with -disablewallet.
#
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import *
class DisableWalletTest (MincoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [['-disablewallet']])
self.is_network_split = False
| self.sync_all()
def run_test (self):
# Check regression: https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-1545488 | 80
x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
assert(x['isvalid'] == False)
x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
assert(x['isvalid'] == True)
# Checking mining to an address without a wallet
try:
self.nodes[0].generatetoaddress(1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ')
except JSONRPCException as e:
assert("Invalid address" not in e.error['message'])
assert("ProcessNewBlock, block not accepted" not in e.error['message'])
assert("Couldn't create new block" not in e.error['message'])
try:
self.nodes[0].generatetoaddress(1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy')
raise AssertionError("Must not mine to invalid address!")
except JSONRPCException as e:
assert("Invalid address" in e.error['message'])
if __name__ == '__main__':
DisableWalletTest ().main ()
|
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/encodings/johab.py | Python | epl-1.0 | 1,023 | 0.006843 | #
# johab.py: Python Unicode Codec for JOHAB
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_kr, codecs
import _multibytecodec as mbc
codec = _codecs_kr.getcodec('johab')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDec | oder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
| codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='johab',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mdecourse/2016fallcadp | data/pyIGES/docs/examples/benchmarks/__init__.py | Python | agpl-3.0 | 1,103 | 0.00816 | #!python3.3
# -*- coding: utf-8 -*-
"""
.. module:: examples.benchmarks
:platform: Agnostic, Windows
:synopsis: Full suite of benchmarks
Created on 10/08/2013
"""
def standard_iges_setup(system, filename):
system.StartSection.Prolog = " "
system.GlobalSection.IntegerBits = int(32)
system.GlobalSection.SPMagnitude = int(38)
system.GlobalSection.SPSignificance = int(6)
system.GlobalSection.DPMagnitude = int(38)
system.GlobalSection.DPSignificance = int(15)
system.GlobalSection.MaxNumberLineWeightGrads = int(8)
system.GlobalSection.WidthMaxLineWeightUnits = float(0.016)
system.GlobalSection.MaxCoordValue = float(71)
index_dot = filename.index('.')
system.GlobalSection.ProductIdentificationFromSender = filename[:index_dot]
system.GlobalSection.FileName = filename
system.GlobalSection.ProductIdentificationForReceiver = \
system.GlobalSection.Pro | ductIdentificationFromSender
system.GlobalSection.AuthorOrg = "Queensland Uni. of Tech."
system.GlobalSection.NameOfA | uthor = "Rodney Persky"
|
statX/nz-houses | analysis/listing_text.py | Python | bsd-2-clause | 1,824 | 0.061952 |
import cPickle
import nz_houses_dao as dao
from nltk.corpus import stopwords
import re
import gzip
class Residential:
distionary
def
def __init__():
pass
def generate_word_lists(descriptions):
descriptions_features = {}
i = 0
for listingId in descriptions.keys():
a = descriptions[listingId].lower()
a = re.sub(r'\d+',r'',a)
a = re.sub(r'sqm',r'',a)
a = re.sub(r'm2',r'',a)
a_words = re.findall(r'\w+',a) #,flags = re.UNICODE | re.LOCALE)
a_words = filter(lambda x: x not in stopwords.words('english'), a_words)
descriptions_features[listingId] = a_words
if i % 50 == 0:
print i
i += 1
return descriptions_features
def print_words(a_words):
b = ''
for word in a_words:
b = b + str(word) + ' '
print b
if __name__ == '__main__':
dbFilePath = '/Users/james/development/code_personal/nz-houses/db/prod1.db'
#df_1 = read_listing_table(dbFilePath)
#df_1 = df_sqlite[df_sqlite['ListingId'] > 641386568]
#print df_1.describe()
#a = df_1[-1:]['Body']
#print a.to_string()
query_result = dao.read_listing_table(dbFilePath)
pickle_flag = 1
if pickle_flag == 0:
descriptions = {}
for row in query_result:
descriptions[row[0]] = row[1]
descriptions_features = generate_word_lists(descriptions)
with gzip.open('/Users/james/development/code_personal/nz-houses/db/descriptions_features.pkl.gz', 'wb') as f:
cPickle.dump(descriptions_features, f, protocol=2)
if pickle_flag == 1:
with gzip.open('/Users/james/development/code_personal/nz-houses/db/descriptions_features.pkl.gz', 'rb') as f:
descriptions_features = cPickle.load( | f)
i = 0
for listingId in reversed(descriptions_features.keys()):
print listingId
print_words(descriptions_features[listingId])
print '-----------'
i += 1
if i == 10:
| break
|
eduNEXT/edunext-ecommerce | ecommerce/core/management/commands/tests/test_sync_hubspot.py | Python | agpl-3.0 | 9,618 | 0.002911 | """
Test the sync_hubspot management command
"""
from datetime import datetime, timedelta
from io import StringIO
from django.core.management import call_command
from django.core.management.base import CommandError
from factory.django import get_model
from mock import patch
from slumber.exceptions import HttpClientError
from ecommerce.core.management.commands.sync_hubspot import Command as sync_command
from ecommerce.extensions.test.factories import create_basket, create_order
from ecommerce.tests.factories import SiteConfigurationFactory, UserFactory
from ecommerce.tests.testcases import TestCase
SiteConfiguration = get_model('core', 'SiteConfiguration')
Basket = get_model('basket', 'Basket')
DEFAULT_INITIAL_DAYS = 1
class TestSyncHubspotCommand(TestCase):
"""
Test sync_hubspot management command.
"""
order = None
hubspot_site_configuration = None
def setUp(self):
super(TestSyncHubspotCommand, self).setUp()
self.hubspot_site_configuration = SiteConfigurationFactory.create(
hubspot_secret_key='test_key',
)
self._create_basket(self.hubspot_site_configuration.site)
self._create_order('1122', self.hubspot_site_configuration.site)
def _get_date(self, days=1):
return datetime.now() - timedelta(days=days)
def _create_basket(self, site):
"""
Creates the basket with given site
"""
basket = create_basket(site=site)
basket.date_created = self._get_date()
basket.save()
basket_line = basket.lines.first()
product = basket_line.product
product.title = "product-title-dummy"
product.description = "product-description-dummy"
product.save()
def _create_order(self, order_number, site):
"""
Creates the order with given order_number and
site also update the order's product's title.
"""
order = create_order(
number="order-{order_number}".format(order_number=order_number),
site=site,
user=UserFactory()
)
order_line = order.lines.first()
product = order_line.product
product.title = "product-title-{order_number}".format(order_number=order_number)
product.save()
basket = order.basket
basket.date_created = self._get_date(days=2)
basket.date_submitted = self._get_date(days=1)
basket.save()
def _mocked_sync_errors_messages_endpoint(self):
"""
Returns mocked sync_errors_messages_endpoint's response
"""
return {'results': [
{'objectType': 'DEAL', 'integratorObjectId': '1234', 'details': 'dummy-details-deal'},
{'objectType': 'PRODUCT', 'integratorObjectId': '4321', 'details': 'dummy-details-product'},
]}
def _get_command_output(self, is_stderr=False):
"""
Runs the command and returns the stdout or stderr output of command.
"""
out = StringIO()
initial_sync_days_param = '--initial-sync-day=' + str(DEFAULT_INITIAL_DAYS)
if is_stderr:
call_command('sync_hubspot', initial_sync_days_param, stderr=out)
else:
call_command('sync_hubspot', initial_sync_days_param, stdout=out)
return out.getvalue()
@patch.object(sync_command, '_hubspot_endpoint')
def test_with_no_hubspot_secret_keys(self, mocked_hubspot):
"""
Test with SiteConfiguration having NOT any hubspot_secret_key.
"""
# removing keys
SiteConfiguration.objects.update(hubspot_secret_key='')
# making sure there are still SiteConfigurations exists
self.assertTrue(SiteConfiguration.objects.count() > 0)
output = self._get_command_output()
self.assertIn('No Hubspot enabled SiteConfiguration Found.', output)
self.assertEqual(mocked_hubspot.call_count, 0)
@patch.object(sync_command, '_hubspot_endpoint')
def test_without_unsynced_carts(self, mocked_hubspot):
"""
Test with SiteConfiguration having hubspot_secret_key and last_synced_order doesn't exit.
1. Install Bridge
2. Define settings
3. Sync-error
"""
with patch.object(sync_command, '_get_unsynced_carts', return_value=None):
output = self._get_command_output()
self.assertIn(
'No data found to sync for site {site}'.format(site=self.hubspot_site_configuration.site.domain),
output
)
self.assertEqual(mocked_hubspot.call_count, 3)
@patch.object(sync_command, '_hubspot_endpoint')
def test_upsert_hubspot_objects(self, mocked_hubspot):
"""
Test when _upsert_hubspot_objects function raises an exception.
"""
with patch.object(sync_ | command, '_install_hubspot_ecommerce_bridge', return_value=True), \
patch.object(sync_command, '_define_hubspot_ecommerce_settings', return_value=True):
# if _upsert_hubspot_objects raises an exception
mocked_hubspot.side_effect = HttpClientError
output = self._get_command_output(is_stderr=True)
self.assertIn('An error occurred while upserting', output)
| @patch.object(sync_command, '_hubspot_endpoint')
def test_install_hubspot_ecommerce_bridge(self, mocked_hubspot):
"""
Test _install_hubspot_ecommerce_bridge function.
"""
with patch.object(sync_command, '_define_hubspot_ecommerce_settings', return_value=False):
output = self._get_command_output()
self.assertIn('Successfully installed hubspot ecommerce bridge', output)
# if _install_hubspot_ecommerce_bridge raises an exception
mocked_hubspot.side_effect = HttpClientError
output = self._get_command_output(is_stderr=True)
self.assertIn('An error occurred while installing hubspot ecommerce bridge', output)
@patch.object(sync_command, '_hubspot_endpoint')
def test_define_hubspot_ecommerce_settings(self, mocked_hubspot):
"""
Test _define_hubspot_ecommerce_settings function.
"""
with patch.object(sync_command, '_install_hubspot_ecommerce_bridge', return_value=True):
output = self._get_command_output()
self.assertIn('Successfully defined the hubspot ecommerce settings', output)
# if _define_hubspot_ecommerce_settings raises an exception
mocked_hubspot.side_effect = HttpClientError
output = self._get_command_output(is_stderr=True)
self.assertIn('An error occurred while defining hubspot ecommerce settings', output)
@patch.object(sync_command, '_hubspot_endpoint')
def test_sync_errors_messages_endpoint(self, mocked_hubspot):
"""
Test _call_sync_errors_messages_endpoint function.
"""
with patch.object(sync_command, '_install_hubspot_ecommerce_bridge', return_value=True), \
patch.object(sync_command, '_define_hubspot_ecommerce_settings', return_value=True), \
patch.object(sync_command, '_sync_data'):
mocked_response = self._mocked_sync_errors_messages_endpoint()
mocked_hubspot.return_value = mocked_response
output = self._get_command_output()
self.assertIn(
'sync-error endpoint: for {object_type} with id {id} for site {site}: {message}'.format(
object_type=mocked_response.get('results')[0].get('objectType'),
id=mocked_response.get('results')[0].get('integratorObjectId'),
site=self.hubspot_site_configuration.site.domain,
message=mocked_response.get('results')[0].get('details')
),
output
)
# if _call_sync_errors_messages_endpoint raises an exception
mocked_hubspot.side_effect = HttpClientError
output = self._get_command_output(is_stderr=True)
self.assertIn(
'An error occurred while getting the error syncing message',
output
|
wez3/domoboard | modules/api.py | Python | gpl-3.0 | 2,175 | 0.006437 | #!/usr/bin/python
# Provides all the API functionality callable through "/api"
from flask import request
from flaskext.auth import login_required
import json, os, sys
import security, charts, plugins, webconfig, domoticz
apiDict = {}
modules = {}
def init():
global modules
modules = plugins.loadPlugins()
return
def addToApi(custom, module, function):
apiDict[custom] = [module, function]
@login_required()
def gateway():
requestedUrl = request.url.split("/api")
custom = request.args.get('custom', '')
if custom == "bar_chart":
result = charts.barChart()
elif custom == "donut_chart":
result = charts.donutC | hart()
elif custom == "modify_config" | :
idx = request.args.get('idx', '')
page = request.args.get('page', '')
component = request.args.get('component', '')
description = request.args.get('description', '')
extra = request.args.get('extra', '')
webconfig.writeToConfig(idx, page, component, description, extra)
elif custom == 'indexPlugins':
result = json.dumps(plugins.indexPlugins(request.args))
elif custom == 'indexWebConfig':
result = json.dumps(webconfig.indexWebConfig(request.args))
elif custom == "performUpgrade":
result = json.dumps(webconfig.performUpgrade())
elif custom in apiDict:
module = apiDict.get(custom)[0]
function = apiDict.get(custom)[1]
call = getattr(modules[module], function)
result = call(request.args)
else:
result = domoticz.queryDomoticz(requestedUrl[1])
try:
if not isJson(result):
result = json.dumps(result)
return security.sanitizeJSON(json.loads(result))
except:
return "No results returned"
def setConfig(cfg, orig_cfg):
global config
global originalCfg
config = cfg
originalCfg = orig_cfg
def setModules(modulesList):
global modules
modules = modulesList
def getConfig():
return config
def getOriginalConfig():
return originalCfg
def isJson(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
return True
|
heromod/migrid | mig/cgi-bin/mv.py | Python | gpl-2.0 | 1,092 | 0.002747 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# mv - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free So | ftware Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fift | h Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.mv import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
|
agry/NGECore2 | scripts/object/tangible/painting/painting_bestine_lucky_despot.py | Python | lgpl-3.0 | 86 | 0.081395 | import sys |
def setup(core, object):
return
#Painting of the | Lucky Despot Wreckage |
previtus/MGR-Project-Code | Downloader/VisualizeHistory.py | Python | mit | 7,469 | 0.006293 | import matplotlib, os, errno
# IF WE ARE ON SERVER WITH NO DISPLAY, then we use Agg:
#print matplotlib.get_backend()
if not('DISPLAY' in os.environ):
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
def visualize_history(hi, show=True, save=False, save_path='', show_also='', custom_title=None):
# Visualize history of Keras model run.
'''
Example calls:
hi = model.fit(...)
saveHistory(hi.history, 'tmp_saved_history.npy')
visualize_history(loadHistory('tmp_saved_history.npy'))
'''
# list all data in history
print(hi.keys())
# summarize history for loss
plt.plot(hi['loss'])
plt.plot(hi['val_loss'])
if show_also <> '':
plt.plot(hi[show_also], linestyle='dotted')
plt.plot(hi['val_'+show_also], linestyle='dotted')
if custom_title is None:
plt.title('model loss')
else:
plt.title(custom_title)
plt.ylabel('loss')
plt.xlabel('epoch')
if show_also == '':
plt.legend(['train', 'test'], loc='upper left')
else:
plt.legend(['train', 'test', 'train-'+show_also, 'test-'+show_also], loc='upper left')
if save:
filename = save_path #+'loss.png'
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
plt.savefig(filename)
plt.savefig(filename+'.pdf', format='pdf')
print "Saved image to "+filename
if show:
plt.show()
plt.clf()
return plt
def visualize_histories(histories, names, plotvalues='loss', show=True, save=False, save_path='', custom_title=None, just_val=False):
'''
Visualize multiple histories.
Example usage:
h1 = loadHistory('history1.npy')
h2 = loadHistory('history2.npy')
visualize_histories([h1, h2], ['history1', 'history2'])
'''
import matplotlib.pyplot as plt
if custom_title is None:
custom_title = 'model ' + plotvalues
if just_val:
custom_title = custom_title + ' (just validation results)'
i = 0
leg = []
for hi in histories:
n = names[i]
# list all data in history
print(hi.keys())
# summarize history for loss
if not just_val:
plt.plot(hi[plotvalues])
plt.plot(hi['val_'+plotvalues])
plt.title(custom_title)
plt.ylabel('loss')
plt.xlabel('epoch')
if not just_val:
leg.append(n + '')
leg.append(n + '_val')
i += 1
#plt.legend(leg, loc='lower left')
plt.legend(leg, loc='best')
if save:
plt.savefig(save_path) #+plotvalues+'.png')
plt.savefig(save_path+'.pdf', format='pdf')
if show:
plt.show()
plt.clf()
return plt
def visualize_special_histories(histories, plotvalues='loss', show=True, save=False, save_path='', custom_title=None, just_val=False):
'''
We are visualizing results of a k-fold crossvalidation training. In <histories> we have the individual runs of the experiment.
'''
train_color = 'grey'
val_color = 'blue'
avg_train_color = 'red'
avg_val_color = 'green'
avg_train = []
avg_val = []
# count the averages
epochs = len(histories[0][plotvalues])
for epoch in range(0, epochs):
trains = []
vals = []
for hi in histories:
train = hi[plotvalues][epoch]
val = hi['val_'+plotvalues][epoch]
trains.append(train)
vals.append(val)
avg_train.append( np.mean(trains) )
avg_val.append( np.mean(vals) )
import matplotlib.pyplot as plt
plt.figure()
if custom_title is None:
custom_title = 'model ' + plotvalues
if just_val:
custom_title = custom_title + ' (just validation results)'
i = 0
leg = []
if not just_val:
leg.append('average training')
leg.append('average validation')
if not just_val:
leg.append('training errors')
leg.append('validation errors')
# now averages:
if not just_val:
plt.plot(avg_train, color=avg_train_color)
plt.plot(avg_val, color=avg_val_color)
for hi in histories:
# list all data in history
print(hi.keys())
# summarize history for loss
if not just_val:
plt.plot(hi[plotvalues], linestyle='dashed', color=train_color)
plt.plot(hi['val_'+plotvalues], linestyle='dashed', color=val_color)
i += 1
# OK, but we also want these on top...:
if not just_val:
plt.plot(avg_train, color=avg_train_color)
plt.plot(avg_val, color=avg_val_color)
plt.title(custom_title)
plt.ylabel('loss')
plt.xlabel('epoch')
#plt.legend(leg, loc='lower left')
plt.legend(leg, loc='best')
if save:
plt.savefig(save_path) #+plotvalues+'.png')
plt.savefig(save_path+'.pdf', format='pdf')
if show:
plt.show()
plt.clf()
return plt
def visualize_whiskered_boxed(whiskered_boxes_data, names, show=True, save=False, save_path='', custom_title=''):
'''
We are visualizing results of a k-fold crossvalidation training.
In <whiskered_boxes_data> we have data for whiskered box plots.
'''
from DatasetHandler.DatasetVizualizators import zoomOutY
plt.close()
plt.figure(figsize=(5, 8))
legend_on = True
if custom_title == '':
custom_title = ','.join(names)
# mark values
save_path += custom_title + '.png'
y_max = 1.0
y_min = 0.0
#y_max = -1 | 00.0
#y_min = 100.0
#for i in whiskered_boxes_data:
# y_max = max(max(i),y_max)
# y_min = min(min(i),y_min)
axes = plt.axes()
import matplot | lib.ticker as ticker
axes.yaxis.set_major_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/10.0))
axes.yaxis.set_minor_locator(ticker.MultipleLocator(np.abs(y_max-y_min)/100.0))
#meanpointprops = dict(linewidth=0.0)
meanpointprops = dict(linewidth=1.0)
boxplot = plt.boxplot(whiskered_boxes_data, notch=False, showmeans=True, meanprops=meanpointprops)
#plt.xticks(names)
if (legend_on):
boxplot['medians'][0].set_label('median')
boxplot['means'][0].set_label('mean')
boxplot['fliers'][0].set_label('outlayers')
# boxplot['boxes'][0].set_label('boxes')
# boxplot['whiskers'][0].set_label('whiskers')
boxplot['caps'][0].set_label('caps')
#axes.set_xlim([0.7, 1.7])
plt.legend(numpoints = 1)
axes.set_title(custom_title)
axes.set_xticklabels(names)
zoomOutY(axes, [0.0,1.0], 0.1)
## save
if save:
plt.savefig(save_path) #+plotvalues+'.png')
plt.savefig(save_path+'.pdf', format='pdf')
if show:
plt.show()
plt.clf()
return plt
def saveHistory(history_dict, filename):
# Save history or histories into npy file
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
to_be_saved = data = {'S': history_dict}
np.save(open(filename, 'w'), to_be_saved)
def loadHistory(filename):
# Load history object
loaded = np.load(open(filename))
return loaded[()]['S']
|
2947721120/thumbor | vows/point_vows.py | Python | mit | 4,575 | 0.000656 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from pyvows import Vows, expect
from thumbor.point import FocalPoint
@Vows.batch
class FocalPointVows(Vows.Context):
class DefaultAlignmentPercentages(Vows.Context):
def topic(self):
return FocalPoint.ALIGNMENT_PERCENTAGES
def should_have_left_alignment_of_0(self, topic):
expect(topic['left']).to_equal(0.0)
def should_have_center_alignment_of_half(self, topic):
expect(topic['center']).to_equal(0.5)
def should_have_right_alignment_of_one(self, topic):
expect(topic['right']).to_equal(1.0)
def should_have_top_alignment_of_0(self, topic):
expect(topic['top']).to_equal(0.0)
def should_have_middle_alignment_of_half(self, topic):
expect(topic['middle']).to_equal(0.5)
def should_have_bottom_alignment_of_one(self, topic):
expect(topic['bottom']).to_equal(1.0)
class NewPoint(Vows.Context):
class DefaultWeight(Vows.Context):
def topic(self):
return FocalPoint(10, 20)
def should_have_x_coord_of_10(self, topic):
expect(topic.x).to_equal(10)
def should_have_y_coord_of_20(self, topic):
expect(topic.y).to_equal(20)
class Weighted(Vows.Context):
def topic(self):
return FocalPoint(x=10, y=20, height=1.0, width=3.0, weight=3.0)
def should_have_weight_of_three(self, topic):
expect(topic.weight).to_equal(3.0)
def should_have_proper_representation(self, topic):
expect(str(topic)).to_equal('FocalPoint(x: 10, y: 20, width: 3, height: 1, weight: 3, origin: alignment)')
class FromDict(Vows.Context):
def topic(self):
return FocalPoint.from_dict({'x': 10.1, 'y': 20.1, 'z': 5.1})
def should_have_x_coord_of_10_1(self, topic):
expect(topic.x).to_equal(10.1)
def should_have_y_coord_of_20_1(self, topic):
expect(topic.y).to_equal(20.1)
def should_have_weight_of_5_1(self, topic):
expect(topic.weight).to_equal(5.1)
class ToDict(Vows.Context):
def topic(self, prev_topic):
return prev_topic.to_dict()
def should_create_the_original_dictionary(self, topic):
expect(topic).to_be_like({'x': 10.1, 'y': 20.1, 'z': 5.1, 'origin': 'alignment', 'width': 1.0, 'height': 1.0})
class SquarePoint(Vows.Context):
def topic(self):
return FocalPoint.from_square(x=350, y=50, width=110, height=110)
def should_have_x_of_450(self, topic):
expect(topic.x).to_equal(405)
def should_have_x_of_150(self, topic):
expect(topic.y).to_equal(105)
def should_have_weight_of_12100(self, topic):
expect(topic.weight).to_equal(12100)
class AlignedPoint(Vows.Context):
class CenterMiddle(Vows.Context):
def topic(self):
return FocalPoint.from_alignment('center', 'middle', 300, 200)
def should_have_x_of_150(self, topic):
expect(topic.x).to_equal(150)
def should_have_y_of_100(self, topic):
expect(topic.y).to_equal(100)
def should_have_weight_of_1(self, topic):
expect(topic.weight).to_equal(1.0)
class TopLeft(Vows.Context):
def topic(self):
return FocalPoint.from_alignment('left', 'top', 300, 200)
def should_have_x_of_0(self, topic):
expect(topic.x).to_equal(0)
def should_have_y_of_0(self, topic):
expect(topic.y).to_equal(0)
def should_have_weight_of_1(self, topic):
expect(topic.weight).to_equal(1.0)
class BottomRight(Vows.Context):
def topic(self):
return Foc | alPoint.from_alignment('right', 'bottom', 300, 200)
def should_have_x_of_300(self, topic):
expect(topic.x).to_equal(300)
def should_have_y_of_200(self, topic):
expect(topic.y).to_equal(200)
def should_have_weight_of_1(self, topic):
| expect(topic.weight).to_equal(1.0)
|
kianby/stacosys | stacosys/db/dao.py | Python | gpl-3.0 | 1,576 | 0.001269 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from datetime import datetime
from stacosys.model.comment import Comment
TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def find_comment_by_id(id):
return Comment.get_by_id(id)
def notify_comment(comment: Comment):
comment.notified = datetime.now().strftime(TIME_FORMAT)
comment.save()
def publish_comment(comment: Comment):
comment.published = datetime.now().strftime(TIME_FORMAT)
comment.save()
def delete_comment(comment: Comment):
comment.delete_instance()
def find_not_notified_comments():
return Comment.select().where(Comment.notified.is_null())
def find_not_published_comments():
return Comment.select().where(Comment.published.is_null())
def find_published_comments_by_url(url):
return Comment.select(Comment).where((Comment.url == url) & (Comment.published.is_null(False))).order_by(
+Comment.published)
def count_published_comments(url):
return Comment.select(Comment).where(
(Comment.url == url) & (Comment.published.is_null(False))).count() if url else Comment.select(Comment).where(
Comme | nt.published.is_null(False)).count()
def create_comment(url, author_name, author_site, author_gravatar, message):
created = datetime.now().strftime("%Y-%m-%d %H:%M | :%S")
comment = Comment(
url=url,
author_name=author_name,
author_site=author_site,
author_gravatar=author_gravatar,
content=message,
created=created,
notified=None,
published=None,
)
comment.save()
return comment
|
chainside/btcpy | btcpy/structs/hd.py | Python | lgpl-3.0 | 10,848 | 0.002397 | # Copyright (C) 2017 chainside srl
#
# This file is part of the btcpy package.
#
# It is subject to the license terms in the LICENSE.md file found in the top-level
# directory of this distribution.
#
# No part of btcpy, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE.md file.
import hmac
from hashlib import sha512
from ..lib.base58 import b58decode_check, b58encode_check
from ecdsa import VerifyingKey
from ecdsa.ellipticcurve import INFINITY
from ecdsa.curves import SECP256k1
from ecdsa.ecdsa import generator_secp256k1
from abc import ABCMeta, abstractmethod
from ..lib.types import HexSerializable
from ..lib.parsing import Stream, Parser
from ..setup import is_mainnet
from .crypto import PrivateKey, PublicKey
from ..constants import Constants
from ..setup import strictness
class ExtendedKey(HexSerializable, metaclass=ABCMeta):
master_parent_fingerprint = bytearray([0]*4)
first_hardened_index = 1 << 31
curve_order = SECP256k1.order
@classmethod
def master(cls, key, chaincode):
return cls(key, chaincode, 0, cls.master_parent_fingerprint, 0, hardened=True)
@classmethod
@strictness
def decode(cls, string, strict=None):
if string[0] == Constants.get('xkeys.prefixes')['mainnet']:
mainnet = True
elif string[0] == Constants.get('xkeys.prefixes')['testnet']:
mainnet = False
else:
raise ValueError('Encoded key not recognised: {}'.format(string))
if strict and mainnet != is_mainnet():
raise ValueError('Trying to decode {}mainnet key '
'in {}mainnet environment'.format('' if mainnet else 'non-',
'non-' if mainnet else ''))
cls._check_decode(string)
decoded = b58decode_check(string)
parser = Parser(bytearray(decoded))
parser >> 4
depth = int.from_bytes(parser >> 1, 'big')
fingerprint = parser >> 4
index = int.from_bytes(parser >> 4, 'big')
if index >= cls.first_hardened_index:
index -= cls.first_hardened_index
hardened = True
else:
hardened = False
chaincode = parser >> 32
keydata = parser >> 33
if string[1:4] == 'prv':
subclass = ExtendedPrivateKey
elif string[1:4] == 'pub':
subclass = ExtendedPublicKey
else:
raise ValueError('Encoded key not recognised: {}'.format(string))
key = subclass.decode_key(keydata)
return subclass(key, chaincode, depth, fingerprint, index, hardened)
@staticmethod
@abstractmethod
def decode_key(keydata):
raise NotImplemented
@staticmethod
def _check_decode(string):
pass
@staticmethod
@abstractmethod
def get_version(mainnet=None):
raise NotImplemented
def __init__(self, key, chaincode, depth, pfing, index, hardened=False):
if not 0 <= depth <= 255:
raise ValueError('Depth must be between 0 and 255')
self.key = key
self.chaincode = chaincode
self.depth = depth
self.parent_fingerprint = pfing
self.index = index
self.hardened = hardened
def derive(self, path):
"""
:param path: a path like "m/44'/0'/1'/0/10" if deriving from a master key,
or a relative path like "./0/10"
:return: the derived ExtendedPublicKey if deriving from an ExtendedPublicKey,
the derived ExtendedPrivateKey if deriving from an ExtendedPrivateKey
"""
steps = path.split('/')
if steps[0] not in {'m', '.'}:
raise ValueError('Invalid derivation path: {}'.format(path))
if steps[0] == 'm' and not self.is_master():
raise ValueError('Trying to derive absolute path from non-master key')
current = self
for step in steps[1:]:
hardened = False
if step[-1] == "'":
hardened = True
step = step[:-1]
index = int(step)
current = current.get_child(index, hardened)
return current
@abstractmethod
def get_child(self, index, hardened=False):
raise NotImplemented
@abstractmethod
def _serialized_public(self):
raise NotImplemented
@abstractmethod
def _serialize_key(self):
raise NotImplemented
def get_hash(self, index, hardened=False):
cls = self.__class__
if hardened:
data = self._serialize_key() + (index + cls.first_hardened_index).to_bytes(4, 'big')
else:
data = self._serialized_public() + index.to_bytes(4, 'big')
h = bytearray(hmac.new(bytes(self.chaincode), bytes(data), sha512).digest())
left, right = int.from_bytes(h[:32], 'big'), h[32:]
if left > cls.curve_order:
raise ValueError('Left side of hmac generated number bigger than SECP256k1 curve order')
return left, right
def is_master(self):
return all([self.depth == 0,
self.parent_fingerprint == ExtendedKey.master_parent_fingerprint,
self.index == 0])
def encode(self, mainnet=None):
return b58encode_check(bytes(self.serialize(mainnet)))
def serialize(self, mainnet=None):
cls = self.__class__
result = Stream()
result << cls.get_version(mainnet)
result << self.depth.to_bytes(1, 'big')
result << self.parent_fingerprint
if self.hardened:
result << (self.index + cls.first_hardened_index).to_bytes(4, 'big')
else:
result << self.index.to_bytes(4, 'big')
result << self.chaincode
result << self._serialize_key()
return result.serialize()
def __str__(self):
return 'version: {}\ndepth: {}\nparent fp: {}\n' \
'index: {}\nchaincode: {}\nkey: {}\nhardened: {}'.format(self.__class__.get_version(),
self.depth,
self.parent_fingerprint,
self.index,
self.chaincode,
self.key,
self.hardened)
def __eq__(self, other):
return all([self.key == other.key,
self.chaincode == other.chaincode,
self.depth == other.depth,
self.parent_fingerprint == other.parent_fingerprint,
self.index == other.index,
self.hardened == other.hardened])
class ExtendedPrivateKey(ExtendedKey):
@staticmethod
def get_version(mainnet=None):
if mainnet is None:
mainnet = is_mainnet()
| # using net_name here would ignore the mainnet=None flag
return Constants.get('xprv.version')['mainnet' if mainnet else 'testnet']
@staticmethod
def decode_key(keydata):
return PrivateKey(keydata[1:])
@staticmethod
def _check_decode(string):
if string[:4] not in (Constants.get('xprv.prefix').values()):
raise ValueError | ('Non matching prefix: {}'.format(string[:4]))
def __init__(self, key, chaincode, depth, pfing, index, hardened=False):
if not isinstance(key, PrivateKey):
raise TypeError('ExtendedPrivateKey expects a PrivateKey')
super().__init__(key, chaincode, depth, pfing, index, hardened)
def __int__(self):
return int.from_bytes(self.key.key, 'big')
def get_child(self, index, hardened=False):
left, right = self.get_hash(index, hardened)
k = (int(self) + left) % self.__class__.curve_order
if k == 0:
raise ValueError('Got 0 as k')
return ExtendedPrivateKey(PrivateKey(k.to_b |
idrogeno/IdroMips | lib/python/Screens/About.py | Python | gpl-2.0 | 21,987 | 0.031109 | from Screen import Screen
from Components.config import config
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Harddisk import harddiskmanager
from Components.NimManager import nimmanager
from Components.About import about
from Components.ScrollLabel import ScrollLabel
from Components.Button import Button
from Components.config import config
from Components.Pixmap import MultiPixmap
from Components.Network import iNetwork
from Components.Label import Label
from Components.ProgressBar import ProgressBar
from os import popen
from Tools.StbHardware import getFPVersion
from boxbranding import getBoxType
boxtype = getBoxType()
from enigma import eTimer, eLabel, eConsoleAppContainer
from Components.HTMLComponent import HTMLComponent
from Components.GUIComponent import GUIComponent
import skin
class About(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.setTitle(_("About"))
hddsplit, = skin.parameters.get("AboutHddSplit", (0,))
#AboutHddSplit = 0
#try:
# hddsplit = skin.parameters.get("AboutHddSplit",(0))[0]
#except:
# hddsplit = AboutHddSplit
if boxtype == 'gb800solo':
BoxName = "GigaBlue HD 800SOLO"
elif boxtype == 'gb800se':
BoxName = "GigaBlue HD 800SE"
elif boxtype == 'gb800ue':
BoxName = "GigaBlue HD 800UE"
elif boxtype == 'gbquad':
BoxName = "GigaBlue HD Quad"
elif boxtype == 'gbuhdquad':
BoxName = "GigaBlue UHD Quad"
elif boxtype == 'gbquadplus':
BoxName = "GigaBlue HD Quadplus"
elif boxtype == 'gb800seplus':
BoxName = "GigaBlue HD 800SEplus"
elif boxtype == 'gb800ueplus':
BoxName = "GigaBlue HD 800UEplus"
elif boxtype == 'gbipbox':
BoxName = "GigaBlue IP Box"
elif boxtype == 'gbultra':
BoxName = "GigaBlue HD Ultra"
elif boxtype == 'gbultraue':
BoxName = "GigaBlue HD Ultra UE"
elif boxtype == 'gbultrase':
BoxName = "GigaBlue HD Ultra SE"
elif boxtype == 'gbx1':
BoxName = "GigaBlue X1"
elif boxtype == 'gbx3':
BoxName = "GigaBlue X3"
elif boxtype == 'spycat':
BoxName = "XCORE Spycat"
elif boxtype == 'quadbox2400':
BoxName = "AX Quadbox HD2400"
else:
BoxName = about.getHardwareTypeString()
ImageType = about.getImageTypeString()
self["ImageType"] = StaticText(ImageType)
Boxserial = popen('cat /proc/stb/info/sn').read()
AboutHeader = ImageType + " - " + BoxName + " - Serial: " + Boxserial
self["AboutHeader"] = StaticText(AboutHeader)
AboutText = AboutHeader + "\n"
#AboutText += _("Hardware: ") + about.getHardwareTypeString() + "\n"
#AboutText += _("CPU: ") + about.getCPUInfoString() + "\n"
#AboutText += _("Installed: ") + about.getFlashDateString() + "\n"
#AboutText += _("Image: ") + about.getImageTypeString() + "\n"
CPUinfo = _("CPU: ") + about.getCPUInfoString() + "\n"
self["CPUinfo"] = StaticText(CPUinfo)
AboutText += CPUinfo + "\n"
CPUspeed = _("Speed: ") + about.getCPUSpeedString() + "\n"
self["CPUspeed"] = StaticText(CPUspeed)
AboutText += CPUspeed + "\n"
ChipsetInfo = _("Chipset: ") + about.getChipSetString() + "\n"
self["ChipsetInfo"] = StaticText(ChipsetInfo)
AboutText += ChipsetInfo + "\n"
KernelVersion = _("Kernel version: ") + about.getKernelVersionString() + "\n"
self["KernelVersion"] = StaticText(KernelVersion)
AboutText += KernelVersion + "\n"
EnigmaVersion = _("GUI Build: ") + about.getEnigmaVersionString()
self["EnigmaVersion"] = StaticText(EnigmaVersion)
AboutText += EnigmaVersion + "\n"
AboutText += _("Enigma (re)starts: %d\n") % config.misc.startCounter.value
EnigmaSkin = _("Skin: ") + config.skin.primary_skin.value[0:-9]
self["EnigmaSkin"] = StaticText(EnigmaSkin)
AboutText += EnigmaSkin + "\n"
GStreamerVersion = _("GStreamer: ") + about.getGStreamerVersionString().replace("GStreamer","")
self["GStreamerVersion"] = StaticText(GStreamerVersion)
AboutText += GStreamerVersion + "\n"
FlashDate = _("Flashed: ") + about.getFlashDateString()
self["FlashDate"] = StaticText(FlashDate)
AboutText += FlashDate + "\n"
ImageVersion = _("Last upgrade: ") + about.getImageVersionString()
self["ImageVersion"] = StaticText(ImageVersion)
AboutText += ImageVersion + "\n"
AboutText += _("DVB drivers: ") + about.getDriverInstalledDate() + "\n"
AboutText += _("Python version: ") + about.getPythonVersionString() + "\n"
fp_version = getFPVersion()
if fp_version is None:
fp_version = ""
else:
fp_version = _("Frontprocessor version: %d") % fp_version
AboutText += fp_version + "\n"
self["FPVersion"] = StaticText(fp_version)
self["TunerHeader"] = StaticText(_("Detected NIMs:"))
AboutText += "\n" + _("Detected NIMs:") + "\n"
nims = nimmanager.nimList(showFBCTuners=False)
for count in range(len(nims)):
if count < 4:
self["Tuner" + str(count)] = StaticText(nims[count])
else:
self["Tuner" + str(count)] = StaticText("")
AboutText += nims[count] + "\n"
self["HDDHeader"] = StaticText(_("Detected HDD:"))
AboutText += "\n" + _("Detected HDD:") + "\n"
hddlist = harddiskmanager.HDDList()
hddinfo = ""
if hddlist:
formatstring = hddsplit and "%s:%s, %.1f %sB %s" or "%s\n(%s, %.1f %sB %s)"
for count in range(len(hddlist)):
if hddinfo:
hddinfo += "\n"
hdd = hddlist[count][1]
if int(hdd.free()) > 1024:
hddinfo += formatstring % (hdd.model(), hdd.capacity(), hdd.free()/1024.0, "G", _("free"))
else:
hddinfo += formatstring % (hdd.model(), hdd.capacity(), hdd.free(), "M", _("free"))
else:
hddinfo = _("none")
self["hddA"] = StaticText(hddinfo)
AboutText += hddinfo
self["AboutScrollLabel"] = ScrollLabel(AboutText)
self["key_green"] = Button(_("Translations"))
self["key_red"] = Button(_("Latest Commits"))
self["key_blue"] = Button(_("Memory Info"))
self["actions"] = ActionMap(["ColorActions", "SetupActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"red": self.showCommits,
"green": self.showTranslationInfo,
"blue": self.showMemoryInfo,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown
})
def showTranslationInfo(self):
self.session.open(TranslationInfo)
def showCommits(self):
self.session.open(CommitInfo)
def showMemoryInfo(self):
self.session.open(MemoryInfo)
class TranslationInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
# don't remove the string out of the _(), or it can't be "translated" anymore.
# TRANSLATORS: Add here whatever should be shown in the "translator" about screen, up to 6 lines (use \n for newline)
info = _("TRANSLATOR_INFO")
if info == "TRANSLATOR_INFO":
info = "(N/A)"
infolines = _("").spli | t("\n")
infomap = {}
for x in infolines:
l = x.split(': ')
if len(l) != 2:
continue
(type, value) = l
infomap[type] = value
print infomap
self["key_ | red"] = Button(_("Cancel"))
self["TranslationInfo"] = StaticText(info)
translator_name = infomap.get("Language-Team", "none")
if translator_name == "none":
translator_name = infomap.get("Last-Translator", "")
self["TranslatorName"] = StaticText(translator_name)
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.close,
"ok": self.close,
})
class CommitInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["CommitInfo", "About"]
self["AboutScrollLabel"] = ScrollLabel(_("Please wait"))
self["actions"] = ActionMap(["SetupActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown,
"left": self.left,
"right": self.right,
"deleteBackward": self.left,
"deleteForward": self.right
})
self["key_red"] = Button(_("Cancel"))
self.project = 0
self.projects = [
#("organisation", "repository", "readable name", "branch"),
("openmips", "stbgui", "openMips Enigma2", "master"),
("openmips", "skin-pax", "openMips Skin GigaBlue Pax", "master"),
( |
Greymerk/python-rpg | src/abilities/explosion.py | Python | gpl-3.0 | 3,489 | 0.048438 | from random import randint
from projectiles import Star
from math import sqrt
from pygame.color import THECOLORS
class Explosion(object):
range = 6
radius = 4
damage = 2, 5
heal = False
name = "Explosion"
icon = "firebolt"
cooldown = 7
def __init__(self, caster, location, item):
self.item = item
self.range = Explosion.range
self.caster = caster
self.target = location
self.range = self.__class__.range
self.damage = self.__class__.damage
self.color = THECOLORS['orange']
casterName = self.caster.getName()
self.entityHit = self.caster.world.getEntityFromLocation(self.target)
if not self.entityHit is None:
targetName = self.entityHit.getName()
self.caster.world.log.append(casterName + ' cast ' + self.__class__.__name__ + ' at ' + targetName)
else:
self.caster.world.log.append(casterName + ' cast ' + self.__class__.__name__ + ' at nothing!')
self.projectile = Star(caster.position, location, self.color, self.fire, self.impact)
self.explosion = []
self.done = False
def update(self):
if not self.projectile.done:
self.projectile.update()
if self.projectile.done:
#explosion start
targets = Explosion.getNearbyTiles(self.target, Explosion.radius)
if not targets:
self.done = True
return True
for pos in targets:
newStar = Star(self.target, pos, self.color, None, None)
self.explosion.append(newStar)
return False
if not self.explosion:
ret | urn True
done = True
for star in self.explosion:
if star.done:
continue
star.update()
if star.done:
e = self.caster.world.getEntityFromLocation(star.end)
if e is not None:
e.inflict(self.caster, randint(self.damage[0], self.damage[1]))
else:
done = False
i | f done:
self.done = True
return done
def draw(self, surface, position, visible):
if not self.done:
if not self.projectile.done:
self.projectile.draw(surface, position, visible)
if self.explosion:
for star in self.explosion:
star.draw(surface, position, visible)
@staticmethod
def getNearbyTiles(pos, r):
targets = []
xP = pos[0] - (r - 1)
yP = pos[1] - (r - 1)
for x in xrange(r * 2 - 1):
for y in xrange(r * 2 - 1):
toHit = (xP + x, yP + y)
if Explosion.canHit(pos, toHit, r):
targets.append(toHit)
return targets
@staticmethod
def canHit(origin, position, r):
relx = abs(float(origin[0]) - float(position[0]))
rely = abs(float(origin[1]) - float(position[1]))
distance = sqrt(relx**2 + rely**2)
if(distance <= r - 1):
return True
return False
@classmethod
def validTarget(cls, actor, target):
if not target in actor.getEnemies():
return False
if not target.isAlive():
return False
if not actor.partyCanSee(target.position):
return False
nearbyTiles = cls.getNearbyTiles(target.position, Explosion.radius)
for pos in nearbyTiles:
e = actor.world.getEntityFromLocation(pos)
if e is None:
continue
if not target.isAlive():
continue
if e is actor:
return False
if e in actor.getFriends():
return False
return True
def fire(self):
self.caster.world.sounds.get("fireball.wav").play()
def impact(self):
self.caster.world.sounds.get("explosion.wav").play()
def getAllyInRange(self, entity, radius):
for e in entity.getFriends():
if e is entity:
continue
if not e.isAlive():
continue
if e.distance(entity.position) < radius:
return e |
furlongm/patchman | arch/admin.py | Python | gpl-3.0 | 882 | 0 | # Copyright 2012 VPAC, http://www.vpac.org
# Copyright 2013-2021 Marcus Furlong <furlongm@gmail.com>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see | <http://www.gnu.org/licenses/>
| from django.contrib import admin
from arch.models import PackageArchitecture, MachineArchitecture
admin.site.register(PackageArchitecture)
admin.site.register(MachineArchitecture)
|
tcstewar/2015-Embodied_Benchmarks | code/benchmark.py | Python | gpl-2.0 | 5,094 | 0.001767 | import argparse
import importlib
import inspect
import logging
import os
import shelve
import time
import matplotlib.pyplot
import numpy as np
import nengo
def find_offset(a, b):
assert len(a) == len(b)
corr = np.correlate(a, b, 'full')
index = np.argmax(corr[len(a):])
return index
class Benchmark(object):
def __init__(self):
self.parser = argparse.ArgumentParser(
description='Nengo benchmark: %s' %
self.__class__.__name__)
self.param_names = []
self.hidden_params = []
self.params()
self.fixed_params()
def default(self, description, **kwarg):
if len(kwarg) != 1:
raise ValueException('Must specify exactly one parameter')
k, v = kwarg.items()[0]
if k in self.param_names:
raise ValueException('Cannot redefine parameter "%s"' % k)
if v is False:
self.parser.add_argument('--%s' % k, action='store_true',
help=description)
else:
self.parser.add_argument('--%s' % k, type=type(v), default=v,
help=description)
self.param_names.append(k)
def fixed_params(self):
self.default('backend to use', backend='nengo')
self.default('time step', dt=0.001)
self.default('random number seed', seed=1)
self.default('data directory', data_dir='data')
self.default('display figures', show_figs=False)
self.default('do not generate figures', no_figs=False)
self.default('enable debug messages', debug=False)
self.hidden_params.extend(['data_dir', 'show_figs',
'no_figs', 'debug'])
def process_args(self, allow_cmdline=True, **kwargs):
if len(kwargs) == 0 and allow_cmdline:
args = self.parser.parse_args()
else:
args = argparse.Namespace()
for k in self.param_names:
v = kwargs.get(k, self.parser.get_default(k))
setattr(args, k, v)
name = self.__class__.__name__
text = []
self.args_text = []
for k in self.param_names:
if k not in self.hidden_params:
text.append('%s=%s' % (k, getattr(args, k)))
self.args_text.append('_%s = %r' % (k, getattr(args, k)))
filename = name + '#' + ','.join(text)
uid = np.random.randint(0x7FFFFFFF)
filename = name + '#' + time.strftime('%Y%m%d-%H%M%S')+('-%08x' % uid)
return args, filename
def make_model(self, **kwargs):
p, fn = self.process_args(allow_cmdline=False, **kwargs)
np.random.seed(p.seed)
model = self.model(p)
return model
def record_speed(self, t):
now = time.time()
self.sim_speed = t / (now - self.start_time)
def run(self, **kwargs):
p, fn = self.process_args(**kwargs)
if p.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
print('running %s' % fn)
np.random.seed(p.seed)
model = self.model(p)
module = importlib.import_module(p.backend)
Simulator = module.Simulator
if p.backend == 'nengo_spinnaker':
import nengo_spinnaker
nengo_spinnaker.add_spinnaker_params(model.config)
for node in model.all_nodes:
if (node.size_in == 0 and
node.size_out > 0 and
callable(node.output)):
model.config[node].function_of_time = True
if not p.no_figs or p.show_figs:
plt = matplotlib.pyplot
else:
plt = None
sim = Simulator(model, dt=p.dt)
self.start_time = time.time()
self.sim_speed = None
result = self.evaluate(p, sim, plt)
if p.backend == 'nengo_spinnaker':
sim.close()
if self.sim_speed is not None and 'sim_speed' not in result:
result['sim_speed'] = self.sim_speed
text = []
for k, v in sorted(result.items()):
text.append('%s = %s' % (k, repr(v)))
if plt is not None:
plt.suptitle(fn.replace('#', '\n') +'\n' + '\n'.join(text),
fontsize=8)
plt.figtext(0.12,0.12,'\n'.join(self.args_text))
text = self.args_text + text
text = '\n'.join(text)
if not os.path.exists(p.data_dir):
os.mkdir(p.data_dir)
fn = os.path.join(p.data_dir, fn)
if not p.no_figs:
plt.savefig(fn + '.png', dpi=300)
with open(fn + '.txt', 'w') as f | :
f.write(text)
print(text)
db = shelve.open(fn + '.db')
db['trange'] = sim.trange()
for k, v i | n inspect.getmembers(self):
if isinstance(v, nengo.Probe):
db[k] = sim.data[v]
db.close()
if p.show_figs:
plt.show()
return result
|
mulkieran/pyudev | pyudev/monitor.py | Python | lgpl-2.1 | 20,793 | 0.000192 | # -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011, 2012, 2013 Sebastian Wiesner <lunaryorn@gmail.com>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
pyudev.monitor
==============
Monitor implementation.
.. moduleauthor:: Sebastian Wiesner <lunaryorn@gmail.com>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
import errno
from threading import Thread
from functools import partial
from pyudev._util import ensure_byte_string
from pyudev.core import Device
from pyudev.os import Pipe, Poll, set_fd_status_flag
__all__ = ['Monitor', 'MonitorObserver']
class Monitor(object):
"""
A synchronous device event monitor.
A :class:`Monitor` objects connects to the udev daemon and listens for
changes to the device list. A monitor is created by connecting to the
kernel daemon through netlink (see :meth:`from_netlink`):
>>> from pyudev import Context, Monitor
>>> context = Context()
>>> monitor = Monitor.from_netlink(context)
Once the monitor is created, you can add a filter using :meth:`filter_by()`
or :meth:`filter_by_tag()` to drop incoming events in subsystems, which are
not of interest to the application:
>>> monitor.filter_by('input')
When the monitor is eventually set up, you can either poll for events
synchronously:
>>> device = monitor.poll(timeout=3)
>>> if device:
... print('{0.action}: {0}'.format(device))
...
Or you can monitor events asynchronously with :class:`MonitorObserver`.
To integrate into various event processing frameworks, the monitor provides
a :func:`selectable <select.select>` file description by :meth:`fileno()`.
However, do *not* read or write directly on this file descriptor.
Instances of this class can directly be given as ``udev_monitor *`` to
functions wrapped through :mod:`ctypes`.
.. versionchanged:: 0.16
Remove :meth:`from_socket()` which is deprecated, and even removed in
recent udev versions.
"""
def __init__(self, context, monitor_p):
self.context = context
self._as_parameter_ = monitor_p
self._libudev = context._libudev
self._started = False
def __del__(self):
self._libudev.udev_monitor_unref(self)
@classmethod
def from_netlink(cls, context, source='udev'):
"""
Create a monitor by connecting to the kernel daemon through netlink.
``context`` is the :class:`Context` to use. ``source`` is a string,
describing the event source. Two sources are available:
``'udev'`` (the default)
Events emitted after udev as registered and configured the device.
This is the absolutely recommended source for applications.
``'kernel'``
Events emitted directly after the kernel has seen the device. The
device has not yet been configured by udev and might not be usable
at all. **Never** use this, unless you know what you are doing.
Return a new :class:`Monitor` object, which is connected to the
given source. Raise :exc:`~exceptions.ValueError`, if an invalid
source has been specified. Raise
:exc:`~exceptions.EnvironmentError`, if the creation of the monitor
failed.
"""
if source not in ('kernel', 'udev'):
raise ValueError('Invalid source: {0!r}. Must be one of "udev" '
'or "kernel"'.format(source))
monitor = context._libudev.udev_monitor_new_from_netlink(
context, ensure_byte_string(source))
if not monitor:
raise EnvironmentError('Could not create udev monitor')
return cls(context, monitor)
@property
def started(self):
"""
``True``, if this monitor was started, ``False`` otherwise. Readonly.
.. seealso:: :meth:`start()`
.. versionadded:: 0.16
"""
return self._started
def fileno(self):
# pylint: disable=anomalous-backslash-in-string
"""
Return the file description associated with this monitor as integer.
This is really a real file descriptor ;), which can be watched and
:func:`select.select`\ ed.
"""
return self._libudev.udev_monitor_get_fd(self)
def filter_by(self, subsystem, device_type=None):
"""
Filter incoming events.
``subsystem`` is a byte or unicode string with the name of a
subsystem (e.g. ``'input'``). Only events originating from the
given subsystem pass the filter and are handed to the caller.
If given, ``device_type`` is a byte or unicode string specifying the
device type. Only devices with the given device type are propagated
to the caller. If ``device_type`` is not given, no additional
filter for a specific device type is installed.
These filters are executed inside the kernel, and client processes
will usually not be woken up for device, that do not match these
filters.
.. versionchanged:: 0.15
This method can also be after :meth:`start()` now.
"""
subsystem = ensure_byte_string(subsystem)
if device_type:
device_type = ensure_byte_string(device_type)
self._libudev.udev_monitor_filter_add_match_subsystem_devtype(
self, subsystem, device_type)
self._libudev.udev_monitor_filter_update(self)
def filter_by_tag(self, tag):
"""
Filter incoming events by the given ``tag``.
``tag`` is a byte or unicode string with the name of a tag. Only
events for devices which have this tag attached pass the filter and are
handed to the caller.
Like with :meth:`filter_by` this filter is also executed inside the
kernel, so that client processes are usually not woken up for devices
without the given ``tag``.
.. udevversion:: 154
.. versionadded:: 0.9
.. versionchanged:: 0.15
This method can also be after :meth:`start()` now.
"""
self._libudev.udev_monitor_filter_add_match_tag(
| self, ensure_byte_string(tag))
self._libudev.udev_monitor_filter_update(self)
def remove_filter(self):
"""
Remove any filters installed with :meth:`filter_by()` or
:meth:`filter_by_tag()` from this monitor.
.. warning::
Up to udev 181 (and possibly even later versions) the underlying
``udev_monitor_filter_remove()`` seems to be broken. If used with
affected versions this method alwa | ys raises
:exc:`~exceptions.ValueError`.
Raise :exc:`~exceptions.EnvironmentError` if removal of installed
filters failed.
.. versionadded:: 0.15
"""
self._libudev.udev_monitor_filter_remove(self)
self._libudev.udev_monitor_filter_update(self)
def enable_receiving(self):
"""
Switch the monitor into listing mode.
Connect to the event source and receive incoming events. Only after
calling this method, the monitor listens for incoming events.
.. note::
This method is implicitly called by :meth:`__iter__`. You don't
need to call it explicitly, if you are iterating over the
monitor.
.. deprecated:: 0.16
Will be |
plotly/python-api | packages/python/plotly/plotly/validators/histogram/_ycalendar.py | Python | mit | 1,058 | 0.000945 | import _plotly_utils.basevalidators
class YcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ycalendar", parent_name="histogram", **kwargs):
super(YcalendarValidator, self).__init__(
plotly_name=plo | tly_name,
| parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
[
"gregorian",
"chinese",
"coptic",
"discworld",
"ethiopian",
"hebrew",
"islamic",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"jalali",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs
)
|
CDSP/zephserver | docs/conf.py | Python | gpl-3.0 | 9,276 | 0.006037 | # -*- coding: utf-8 -*-
#
# zephserver documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 8 10:50:38 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# sou | rce_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'zephserver'
copyright = u'2015, cdsp'
author = u'cdsp'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short | X.Y version.
version = '0.1.25'
# The full version, including alpha/beta/rc tags.
release = '0.1.25'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zephserverdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zephserver.tex', u'zephserver Documentation',
u'cdsp', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Opti |
crypt3lx2k/Imageboard-Web-Interface | defaults.py | Python | mit | 363 | 0.019284 | """
This file holds the default values for the various programs.
"""
import sys
__all__ = ['defaults']
defaults = {
# filenames
'cache_file' : 'bin/cache.bin',
'log_file' : sys.stderr,
# values
'num_threads' : 16,
# flags
'debug' : False,
| 'https' | : False,
'offline' : False,
'quiet' : False
}
|
sacharya/nova | nova/tests/virt/hyperv/db_fakes.py | Python | apache-2.0 | 5,743 | 0.001219 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'instance_type': 'm1.tiny',
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
}
}
def get_fake_block_device_info(target_portal, volume_id):
return {'block_device_mapping': [{'connection_info': {
'driver_volume_type': 'iscsi',
'data': {'target_lun': 1,
'volume_id': volume_id,
'target_iqn':
'iqn.2010-10.org.openstack:volume-' +
volume_id,
'target_portal': target_portal,
'target_discovered': False}},
'mount_device': 'vda',
'delete_on_termination': False}],
'root_device_name': None,
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'instance_type' not in values:
return
instance_type = values['instance_type']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid' | : str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
| 'user_id': values['user_id'],
'project_id': values['project_id'],
'instance_type': instance_type,
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': instance_type['root_gb'],
}
return FakeModel(base_options)
def fake_instance_type_get_all(context, inactive=0, filters=None):
return INSTANCE_TYPES.values()
def fake_instance_type_get_by_name(context, name):
return INSTANCE_TYPES[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_instance_type_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_instance_type_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
|
stephanie-wang/ray | python/ray/tune/examples/logging_example.py | Python | apache-2.0 | 1,989 | 0 | #!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
from ray import tune
from ray.tune import Trainable, run
class TestLogger(tune.logger.Logger):
def on_result(self, result):
print("TestLogger", result)
def trial_str_creator(trial):
return "{}_{}_123".format(trial.trainable_name, trial.trial_id)
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w" | ) as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
" | --smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
trials = run(
MyTrainableClass,
name="hyperband_test",
num_samples=5,
trial_name_creator=trial_str_creator,
loggers=[TestLogger],
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(lambda spec: int(100 * random.random()))
})
|
stutivarshney/Bal-Aveksha | WebServer/manage.py | Python | gpl-3.0 | 807 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebServer.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking o | ther
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
" | forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
Mariatta/clone_ndb_entity | clone_ndb_entity.py | Python | mit | 1,677 | 0.005963 | """
clone an ndb entity
"""
from google.appengine.ext import ndb
def construct_clone_keys_from_entity_key(entity_key, clone_kind):
"""
:param entity_key: the entity key (ndb.Key)
:param clone_kind: the kind of the clone model
:return: the key for the entity clone model
"""
keys = [(clone_kind, key_id) for _, key_id in entity_key.pairs()]
return ndb.Key(pairs=keys)
def clone_entity_properties(entity_to_clone, to_class, clone_key=False, **extra_args):
"""Clones the properties of an entity into another entity, adding or overriding constructor attributes.
The cloned entity will have exactly the same property values as the original
entity, except where overridden. By default it will have no parent entity or key name, unless supplied.
Args:
:param entity_to_clone: The entity to clone
:param to_class: the new Entity type
:param clone_key: whether to clone the entity key or not
:param extra_args: Keyword arguments to override from the cloned entity and pass
to the constructor.
:returns:
A cloned, possibly modified, copy of entity e that has the same p | roperties as e
"""
entity_dict = entity_to_clone.to_dict()
new_props = {}
for k, v in entity_to_clone.__class__.__dict__.iteritems():
if isinstance(v, ndb.Property) and not isinstance(v, ndb.ComputedProperty): # can't clone ComputedProperty
new_props[k] = entity_dict[k]
new_props.update(extra_args)
new_entity = to_class(**new_props)
if clone_key:
key = construct_clone_keys_from_entity_key(entity_to_clone.key, to_class)
new_en | tity.key = key
return new_entity
|
felipenaselva/repo.felipe | plugin.video.specto/resources/lib/sources/disabled/directdl_tv.py | Python | gpl-2.0 | 4,247 | 0.011773 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,base64
from resources.lib.libraries import control
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://directdownload.tv'
self.search_link = 'L2FwaT9rZXk9NEIwQkI4NjJGMjRDOEEyOSZxdWFsaXR5W109SERUViZxdWFsaXR5W109RFZEUklQJnF1YWxpdHlbXT03MjBQJnF1YWxpdHlbXT1XRUJETCZxdWFsaXR5W109V0VCREwxMDgwUCZsaW1pdD0yMCZrZXl3b3JkPQ=='
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = tvshowtitle
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
url = '%s S%02dE%02d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
if (control.sett | ing('realdedrid_user') == '' and control.setting('premiumize_u | ser') == ''): raise Exception()
query = base64.urlsafe_b64decode(self.search_link) + urllib.quote_plus(url)
query = urlparse.urljoin(self.base_link, query)
result = client.request(query)
result = json.loads(result)
title, hdlr = re.compile('(.+?) (S\d*E\d*)$').findall(url)[0]
title = cleantitle.tv(title)
hdlr = [hdlr]
links = []
for i in result:
try:
t = i['showName']
t = client.replaceHTMLCodes(t)
t = cleantitle.tv(t)
if not t == title: raise Exception()
y = i['release']
y = re.compile('[\.|\(|\[|\s](\d{4}|S\d*E\d*)[\.|\)|\]|\s]').findall(y)[-1]
y = y.upper()
if not any(x == y for x in hdlr): raise Exception()
quality = i['quality']
if quality == 'WEBDL1080P': quality = '1080p'
elif quality in ['720P', 'WEBDL']: quality = 'HD'
else: quality = 'SD'
size = i['size']
size = float(size)/1024
info = '%.2f GB' % size
url = i['links']
for x in url.keys(): links.append({'url': url[x], 'quality': quality, 'info': info})
except:
pass
for i in links:
try:
url = i['url']
if len(url) > 1: raise Exception()
url = url[0]
host = (urlparse.urlparse(url).netloc).replace('www.', '').rsplit('.', 1)[0].lower()
if not host in hosthdDict: raise Exception()
sources.append({'source': host, 'quality': i['quality'], 'provider': 'DirectDL', 'url': url, 'info': i['info']})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
|
will-Do/tp-libvirt_v2v | libvirt/tests/src/virsh_cmd/network/virsh_net_list.py | Python | gpl-2.0 | 6,096 | 0.000164 | import re
import os
from autotest.client.shared import error
from virttest import virsh
from provider import libvirt_version
def run(test, params, env):
"""
Test command: virsh net-list.
The command returns list of networks.
1.Get all parameters from configuration.
2.Get current network's status(State, Autostart).
3.Do some prepare works for testing.
4.Perform virsh net-list operation.
5.Recover network status.
6.Confirm the result.
"""
option = params.get("net_list_option", "")
extra = params.get("net_list_extra", "")
status_error = params.get("status_error", "no")
net_name = params.get("net_list_name", "default")
persistent = params.get("net_list_persistent", "yes")
net_status = params.get("net_list_error", "active")
tmp_xml = os.path.join(test.tmpdir, "tmp.xml")
net_current_status = "active"
autostart_status = "yes"
if not virsh.net_state_dict()[net_name]['active']:
net_current_status = "inactive"
if not virsh.net_state_dict()[net_name]['autostart']:
autostart_status = "no"
# acl polkit params
uri = params.get("virsh_uri")
unprivileged_user = params.get('unprivileged_user')
if unprivileged_user:
if unprivileged_user.count('EXAMPLE'):
unprivileged_user = 'testacl'
if not libvirt_version.version_compare(1, 1, 1):
if params.get('setup_libvirt_polkit') == 'yes':
raise error.TestNAError("API acl test not supported in current"
" libv | irt version.")
# Create a transient network.
try:
if persistent == "no":
virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False)
if net_current_status == "inactive":
virsh.net_destroy(net_name, ignore_status=False)
virsh.net_undefine(net_name | , ignore_status=False)
virsh.net_create(tmp_xml, ignore_status=False)
except error.CmdError:
raise error.TestFail("Transient network test failed!")
# Prepare network's status for testing.
if net_status == "active":
try:
if not virsh.net_state_dict()[net_name]['active']:
virsh.net_start(net_name, ignore_status=False)
except error.CmdError:
raise error.TestFail("Active network test failed!")
else:
try:
if virsh.net_state_dict()[net_name]['active']:
virsh.net_destroy(net_name, ignore_status=False)
except error.CmdError:
raise error.TestFail("Inactive network test failed!")
virsh_dargs = {'ignore_status': True}
if params.get('setup_libvirt_polkit') == 'yes':
virsh_dargs['unprivileged_user'] = unprivileged_user
virsh_dargs['uri'] = uri
result = virsh.net_list(option, extra, **virsh_dargs)
status = result.exit_status
output = result.stdout.strip()
# Recover network
try:
if persistent == "no":
virsh.net_destroy(net_name, ignore_status=False)
virsh.net_define(tmp_xml, ignore_status=False)
if net_current_status == "active":
virsh.net_start(net_name, ignore_status=False)
if autostart_status == "yes":
virsh.net_autostart(net_name, ignore_status=False)
else:
if net_current_status == "active" and net_status == "inactive":
virsh.net_start(net_name, ignore_status=False)
elif net_current_status == "inactive" and net_status == "active":
virsh.net_destroy(net_name, ignore_status=False)
except error.CmdError:
raise error.TestFail("Recover network failed!")
# check result
if status_error == "yes":
if status == 0:
raise error.TestFail("Run successfully with wrong command!")
elif status_error == "no":
if status != 0:
raise error.TestFail("Run failed with right command")
if option == "--inactive":
if net_status == "active":
if re.search(net_name, output):
raise error.TestFail("Found an active network with"
" --inactive option")
else:
if persistent == "yes":
if not re.search(net_name, output):
raise error.TestFail("Found no inactive networks with"
" --inactive option")
else:
# If network is transient, after net-destroy it,
# it will disapear.
if re.search(net_name, output):
raise error.TestFail("Found transient inactive networks"
" with --inactive option")
elif option == "":
if net_status == "active":
if not re.search(net_name, output):
raise error.TestFail("Can't find active network with no"
" option")
else:
if re.search(net_name, output):
raise error.TestFail("Found inactive network with"
" no option")
elif option == "--all":
if net_status == "active":
if not re.search(net_name, output):
raise error.TestFail("Can't find active network with"
" --all option")
else:
if persistent == "yes":
if not re.search(net_name, output):
raise error.TestFail("Can't find inactive network with"
" --all option")
else:
# If network is transient, after net-destroy it,
# it will disapear.
if re.search(net_name, output):
raise error.TestFail("Found transient inactive network"
" with --all option")
|
wolfy1339/Python-IRC-Bot | utils/irc.py | Python | mit | 2,432 | 0.002467 | import time
from typing import List, Optional
from utils import tasks
from zirc.event import Event
from utils.database import Database
from zirc.wrappers import connection_wrapper
def chunks(l: List, n: int):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def set_mode(irc: connection_wrapper, channel: str, users: List[str], mode: str):
for block in chunks(users, 4):
modes = "".join(mode[1:]) * len(block)
irc.mode(channel, " ".join(block), mode[0] + modes)
def get_users(args: str):
if args.find(",") != -1:
pos = args.find(",")
users_str = args[pos:].strip()
if args[pos + 1] != " ":
users = users_str[1:].split(",")
else:
users = users_str[2:].split(", ")
args = args[:pos].strip().split(" ")
users.append(args[-1])
else:
args_list = args.split(" | ")
if len(args_list) == 1:
| users = args_list[0]
elif len(args_list) >= 2:
users = args_list[:-1]
return users
def get_user_host(userdb: Database, channel: str, nick: str):
return userdb.get_user_host(channel, nick)
def get_info_tuple(event: Event, args: List[str], userdb: Optional[Database]=None):
if args[0].startswith("#"):
channel = args[0]
str_args = " ".join(args[1:])
del args[0]
else:
channel = event.target
str_args = " ".join(args)
if str_args.find(",") != -1:
users = get_users(str_args)
else:
users = args[-1:]
if " ".join(args[:-len(users)]) != '':
message = " ".join(args[:-len(users)])
else:
message = f"{event.source.nick}"
for (i, v) in enumerate(users):
if not v.find("!") != -1 and userdb is not None:
users[i] = get_user_host(userdb, event.target, v)
return channel, users, message
def unban_after_duration(irc: connection_wrapper, users: List[str], chan: str, duration: int):
duration += int(time.time())
def func(irc: connection_wrapper, users: List[str], chan: str):
for i in users:
irc.unban(chan, i)
tasks.run_at(duration, func, (irc, users, chan))
def strip_colours(s: str):
import re
ccodes = ['\x0f', '\x16', '\x1d', '\x1f', '\x02',
'\x03([1-9][0-6]?)?,?([1-9][0-6]?)?']
for cc in ccodes:
s = re.sub(cc, '', s)
return s
|
ericremoreynolds/reynolds.mappings | Reynolds.Mappings/Mapping.py | Python | mit | 22,598 | 0.03297 | #----
MAX_N = 6
#----
import sys
from CodeGen import *
codegen_begin("Mapping.cs")
stmt("using System")
stmt("using System.Collections")
stmt("using System.Collections.Generic")
stmt("using System.Threading")
stmt("using System.Linq")
with block("namespace Reynolds.Mappings"):
for n in range(1, MAX_N):
if n == 1:
subs = {
'Tuple<TKeys>': "TKey",
'TKeys': "TKey",
'TKeys keys': "TKey key",
'keytuple': "key",
'keys': 'key',
'IEqualityComparer<TKeys>' : "IEqualityComparer<TKey>",
'IEqualityComparer<TKeys> comparers' : "IEqualityComparer<TKey> comparer"
}
else:
subs = {
'TKeys': ", ".join(["TKey%i" % (i+1) for i in range(n)]),
'TKeys keys': ", ".join(["TKey%i key%i" % (i+1, i+1) for i in range(n)]),
'Tuple<TKeys>': "Tuple<" + ", ".join(["TKey%i" % (i+1) for i in range(n)]) + ">",
'keytuple': "new Tuple<" + ", ".join(["TKey%i" % (i+1) for i in range(n)]) + ">(" + ", ".join(["key%i" % (i+1) for i in range(n)]) + ")",
'keys': ", ".join(['key' + str(i+1) for i in range(n)]),
'IEqualityComparer<TKeys>' : ", ".join(["IEqualityComparer<TKey%i>" % (i+1) for i in range(n)]),
'IEqualityComparer<TKeys> comparers' : ", ".join(["IEqualityComparer<TKey%i> comparer%i" % (i+1, i+1) for i in range(n)])
}
with placeholders(**subs):
# --------------- IDomain -------------
with block("public interface IDomain<$TKeys$> : IEnumerable<$Tuple<TKeys>$>"):
stmt("bool Contains($TKeys keys$)")
with block("int Count"):
stmt("get")
with block("bool IsFinite"):
stmt("get")
with block("bool IsNumerable"):
stmt("get")
# --------------- KeyValueTuple -------------
with block("public interface IKeyValueTuple<$TKeys$, out TValue>"):
if n>1:
for i in range(1, n+1):
with block("TKey%i Key%i" % (i, i)):
stmt("get")
else:
with block("TKey Key"):
stmt("get")
with block("TValue Value"):
stmt("get")
with block("public struct KeyValueTuple<$TKeys$, TValue> : IKeyValueTuple<$TKeys$, TValue>"):
stmt("KeyValuePair<$Tuple<TKeys>$, TValue> inner")
with block("public KeyValueTuple(KeyValuePair<$Tuple<TKeys>$, TValue> inner)"):
stmt("this.inner = inner")
if n>1:
for i in range(1, n+1):
with block("public TKey%i Key%i" % (i, i)):
with block("get"):
stmt("return inner.Key.Item%i" % i)
else:
with block("public TKey Key"):
with block("get"):
stmt("return inner.Key")
with block("public TValue Value"):
with block("get"):
stmt("return inner.Value")
# --------------- IMapping ----------------
with block("public interface IMapping<$TKeys$, out TValue> : IEnumerable<IKeyValueTuple<$TKeys$, TValue>>, IDomain<$TKeys$>"):
with block("TValue this[$TKeys keys$]"):
stmt("get")
stmt("IEnumerator<IKeyValueTuple<$TKeys$, TValue>> GetEnumerator()")
# --------------- Mapping ----------
with block("public class Mapping<$TKeys$, TValue> : IMapping<$TKeys$, TValue>"):
stmt("public delegate TValue GetDelegate($TKeys keys$)")
stmt("GetDelegate _getter")
with block("public Mapping(GetDelegate getter)"):
stmt("_getter = getter")
with block("public TValue this[$TKeys keys$]"):
with block("get"):
stmt("return _getter($keys$)")
with block("public bool IsFinite"):
with block("get"):
stmt("return false")
with block("public bool IsNumerable"):
with block("get"):
stmt("return false")
with block("public bool Contains($TKeys keys$)"):
stmt("return true")
with block("public int Count"):
with block("get"):
stmt('throw new Exception("Domain is not finite")')
with block("public IEnumerator<IKeyValueTuple<$TKeys$, TValue>> GetEnumerator()"):
stmt('throw new Exception("Domain is non-numerable")')
with block("IEnumerator<$Tuple<TKeys>$> IEnumerable<$Tuple<TKeys>$>.GetEnumerator()"):
stmt('throw new Exception("Domain is non-numerable")')
with block("IEnumerator IEnumerable.GetEnumerator()"):
stmt('throw new Exception("Domain is non-numerable")')
# --------------- DictionaryMapping ----------------
with block("public class DictionaryMapping<$TKeys$, TValue> : Dictionary<$Tuple<TKeys>$, TValue>, IMapping<$TKeys$, TValue>"):
if n > 1:
with block("protected class EqualityComparer : IEqualityComparer<$Tuple<TKeys>$>"):
for i in range(n):
stmt("IEqualityComparer<TKey%i> comparer%i" % (i+1, i+1))
with block("public EqualityComparer($IEqualityComparer<TKeys> comparers$)"):
for i in range(n):
stmt("this.comparer%i = (comparer%i == null ? EqualityComparer<TKey%i>.Default : comparer%i)" % (i+1, i+1, i+1, i+1))
with block("public bool Equals($Tuple<TKeys>$ a, $Tuple<TKeys>$ b)"):
stmt ("return " + " && ".join(("comparer%i.Equals(a.Item%i, b.Item%i)" % (i+1, i+1, i+1)) for i in range(n)))
with block("public int GetHashCode($Tuple<TKeys>$ obj)"):
stmt("int result = %i" % n)
with block("unchecked"):
for i in range(n):
stmt("result = result * 23 + comparer%i.GetHashCode(obj.Item%i)" % (i+1, i+1))
stmt("return result")
with block("public bool Contains($TKeys keys$)"):
stmt("return this.ContainsKey($keytuple$)")
with block("public bool IsFinite"):
with block("get"):
stmt("return true")
with block("public bool IsNumerable"):
with block("get"):
stmt("return true")
with block("public DictionaryMapping() : base()"):
pass
with block("public DictionaryMapping($IEqualityComparer<TKeys> comparers$) : base (" + ("comparer" if n == 1 else "new EqualityComparer(" + ", ".join(("comparer%i" % (i+1) for i in range(n))) + ")") + ")"):
pass
with block("public new IEnumerator<IKeyValueTuple<$TKeys$, TValue>> GetEnumerator()"):
with block("for(var e = base.GetEnumerator(); e.MoveNext(); )"):
stmt("yield return new KeyValueTuple<$TKeys$, TValue>(e.Current)")
with block("IEnumerator<$Tuple<TKeys>$> IEnumerable<$Tuple<TKeys>$>.GetEnumerator()"):
stmt("return this.Keys.GetEnumerator()")
with block("IEnumerator IEnumerable.GetEnumerator()"):
stmt("return this.Keys.GetEnumerator()")
if n > 1:
with block("public bool ContainsKey($TKeys keys$)"):
stmt("return base.ContainsKey($keytuple$)")
with block("public bool Remove($TKeys keys$)"):
stmt("return base.Remove($keytuple$)")
with block("public void Add($TKeys keys$, TValue value)"):
stmt("base.Add($keytuple$, value)")
with block("public bool TryGetValue($TKeys keys$, out TValue value)"):
stmt("return base.TryGetValue($keytuple$, out value)")
with block("public TValue this[$TKeys keys$]"):
with block("get"):
stmt("return base[$keytuple$]")
with block("set"):
stmt("base[$keytuple$] = value")
# --------------- LazyMapping ----------------
with block("public class LazyMapping<$TKeys$, TValue> : IMapping<$TKeys$, TValue>"):
stmt("public delegate TValue InstantiateDelegate($TKeys keys$)")
stmt("public delegate bool ContainsDelegate($TKeys keys$)")
stmt("protected InstantiateDelegate _instantiator")
stmt("protected ContainsDelegate _contains")
stmt("protected DictionaryMapping<$TKeys$, TValue> _in | ner")
with block("public LazyMapping(InstantiateDelegate instantiator, ContainsDelegate contains = null)"):
stmt("_inner = new DictionaryMapping<$TKeys$, TValue>()")
stmt("_instantiator = instantiator")
stmt("_contains = contains")
| with block("public LazyMapping(InstantiateDelegate instantiator, ContainsDelegate contains, $IEqualityComparer<TKeys> comparers$)"):
stmt("_inner = new DictionaryMapping<$TKeys$, TValue>(" + ("comparer" if n == 1 else ", ".join(("comparer%i" % (i+1) for i in range(n)))) + ")")
stmt("_instantiator = instantiator")
stmt("_contains = contains")
with block("public bool Contains($TKeys keys$)"):
with block("if(_contains == null)"):
stmt("return true")
with block("else"):
stmt("return _contains($keys$)")
with block("public |
QISKit/qiskit-sdk-py | qiskit/pulse/pulse_lib/__init__.py | Python | apache-2.0 | 570 | 0 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# c | opyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module fo | r builtin pulse_lib."""
from .discrete import *
|
6112/project-euler | problems/066.py | Python | mit | 2,117 | 0.002867 | # encoding=utf-8
## SOLVED 2014/11 | /29
## 661
# Consider quadratic Diophantine equations of the form:
# x^2 – Dy^2 = 1
# For example, when D=13, the minimal solution in x is 649^2 – 13×180^2 = 1.
# It can be assumed that there are no solutions in positive integers when D is
# square.
# By finding minimal solutions in x for D = {2, 3, 5, 6, 7}, we obtain the
# followin | g:
# 3^2 – 2×2^2 = 1
# 2^2 – 3×1^2 = 1
# 9^2 – 5×4^2 = 1
# 5^2 – 6×2^2 = 1
# 8^2 – 7×3^2 = 1
# Hence, by considering minimal solutions in x for D ≤ 7, the largest x is
# obtained when D=5.
# Find the value of D ≤ 1000 in minimal solutions of x for which the largest
# value of x is obtained.
from math import sqrt
LIMIT = 1000
# see http://mathworld.wolfram.com/PellEquation.html
def euler():
highest_x = 0
highest_D = 0
for D in range(2, LIMIT + 1):
fract = continued_fraction(D)
if fract[1]: # if not a square
# length of the repeating partials
r = len(fract[1])
# list for int(sqrt(D)), followed by partials
a = [fract[0]] + fract[1]
# p[n]/q[n] is the nth convergent fraction for sqrt(D)
ps = [a[0], a[0] * a[1] + 1]
qs = [1, a[1]]
for n in range(1, 2 * r + 2):
a_n = a[1 + (n % r)]
ps.append(a_n * ps[n] + ps[n-1])
qs.append(a_n * qs[n] + qs[n-1])
# the solution x/y will always be a convergent for sqrt(D)
x = ps[n + 1]
y = qs[n + 1]
if x * x - D * y * y == 1:
if x > highest_x:
highest_x = x
highest_D = D
break
return highest_D
def continued_fraction(S):
if is_square(S):
return (int(sqrt(S)), [])
xs = []
m = m0 = 0
d = d0 = 1
a = a0 = int(sqrt(S))
while a != 2 * a0:
m = d * a - m
d = (S - m * m) / d
a = int((a0 + m) / d)
xs.append(a)
return (a0, xs)
def is_square(n):
return int(sqrt(n)) == sqrt(n)
|
JNeiger/robocup-software | soccer/gameplay/plays/restarts/our_corner_kick.py | Python | apache-2.0 | 3,158 | 0.0019 | import standard_play
import behavior
import skills
import tactics
import robocup
import constants
import main
import enum
class OurCornerKick(standard_play.StandardPlay):
MinChipRange = 0.3
MaxChipRange = 3.0
ChipperPower = 0.5
TargetSegmentWidth = 1.5
MaxKickSpeed = 0.5
MaxKickAccel = 0.5
def __init__(self):
super().__init__(continuous=True)
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
self.kicker = skills.line_kick.LineKick()
self.add_transition(behavior.Behavior.State.running,
behavior.Behavior.State.completed,
self.kicker.is_done_running, 'kicker is done')
@classmethod
def score(cls):
gs = main.game_state()
if gs.is_ready_state() and gs.is_our_direct() and main.ball().pos.y > (
constants.Field.Length - 1.0):
return 1
else:
return float("inf")
@classmethod
def is_restart(cls):
return True
def on_enter_running(self):
self.kicker = skills.line_kick.LineKick()
self.kicker.use_chipper = True
self.kicker.chip_power = OurCornerKick.ChipperPower # TODO: base this on the target dist from the bot
self.kicker.min_chip_range = OurCornerKick.MinChipRange
self.kicker.max_chip_range = OurCornerKick.MaxChipRange
self.kicker.max_speed = OurCornerKick.MaxKickSpeed
self.kicker.max_accel = OurCornerKick.MaxKickAccel
self.add_subbehavior(self.kicker, 'kicker', required=True, priority=5)
# larger avoid ball radius for line kick setup so we don't run over the b | all backwards
self.kicker.setup_ball_avoid = constants.Field.CenterRadius - constants.Robot.Radius
self.kicker.drive_around_dist = constants.Field.CenterRadius - constants.Robot.Radius
self.center1 = skills.move.Move()
s | elf.add_subbehavior(self.center1,
'center1',
required=False,
priority=4)
self.center2 = skills.move.Move()
self.add_subbehavior(self.center2,
'center2',
required=False,
priority=3)
def execute_running(self):
# setup the kicker target
goal_x = constants.Field.GoalWidth * (1 if main.ball().pos.x < 0 else
-1)
target = robocup.Segment(
robocup.Point(goal_x, constants.Field.Length), robocup.Point(
goal_x,
constants.Field.Length - OurCornerKick.TargetSegmentWidth))
self.kicker.target = target
# set centers' positions
center_x_mag = constants.Field.GoalWidth / 2.0 + 0.5
center_y = constants.Field.Length - OurCornerKick.TargetSegmentWidth / 2.0
self.center1.target = robocup.Point(center_x_mag, center_y)
self.center2.target = robocup.Point(-center_x_mag, center_y)
|
laudney/tinyrpc | tinyrpc/transports/tcp.py | Python | mit | 4,970 | 0.00161 | import logging
log = logging.getLogger('StreamTransport')
import Queue
import gevent
from gevent import socket
from . import ServerTransport, ClientTransport
class StreamServerTransport(ServerTransport):
"""TCP socket transport.
This transport has a few peculiarities: It must be run in a thread,
greenlet or some other form of concurrent execution primitive.
This is due to
:py:func:`~tinyrpc.transports.socket.StreamServerTransport.handle` blocking
while waiting for a call to
:py:func:`~tinyrpc.transports.socket.StreamServerTransport.send_reply`.
The parameter ``queue_class`` must be used to supply a proper queue class
for the chosen concurrency mechanism (i.e. when using :py:mod:`gevent`,
set it to :py:class:`gevent.queue.Queue`).
:param queue_class: The Queue class to use.
"""
def __init__(self, queue_class=Queue.Queue):
self._config_buffer = 4096
self._config_timeout = 90
self._socket_error = False
self._queue_class = queue_class
self.messages = queue_class()
def receive_message(self):
return self.messages.get()
def send_reply(self, context, reply):
if not isinstance(reply, basestring):
raise TypeError('string expected')
context.put(reply)
def _get_data(self, sock, address):
""" Retrieves a data chunk from the socket. """
sock_error = False
try:
| data = sock.recv(self._config_buffer)
except socket.timeout:
sock_error = True
data = None
log.debug('StreamServerTransport:socket timeout from %s', address)
except socket.error:
sock_error = True
data = None
log.debug('StreamServerTransport: | socket error from %s', address)
return data, sock_error
def _get_msg(self, sock, address):
sock_error = False
chunks = []
while True:
data, sock_error = self._get_data(sock, address)
if not data:
break
chunks.append(data)
if len(data) < self._config_buffer:
break
msg = ''.join(chunks)
return msg, sock_error
def handle(self, sock, address):
"""StreamServer handler function.
The transport will serve a request by reading the message and putting
it into an internal buffer. It will then block until another
concurrently running function sends a reply using
:py:func:`~tinyrpc.transports.socket.StreamServerTransport.send_reply`.
The reply will then be sent to the client being handled and handle will
return.
"""
sock.settimeout(self._config_timeout)
while True:
msg, sock_error = self._get_msg(sock, address)
if msg and len(msg):
log.debug('StreamServerTransport:%s', msg)
# create new context
context = self._queue_class()
self.messages.put((context, msg))
# ...and send the reply
response = context.get()
sock.send(response)
if sock_error:
log.debug('')
sock.close()
break
class StreamClientTransport(ClientTransport):
"""TCP socket based client transport.
Requires :py:mod:`websocket-python`. Submits messages to a server using the body of
an ``HTTP`` ``WebSocket`` message. Replies are taken from the response of the websocket.
The connection is establish on the ``__init__`` because the protocol is connection oriented,
you need to close the connection calling the close method.
:param endpoint: The URL to connect the websocket.
:param kwargs: Additional parameters for :py:func:`websocket.send`.
"""
def __init__(self, endpoint, **kwargs):
self._config_timeout = 5
self._config_buffer = 4096
self.endpoint = endpoint
self.request_kwargs = kwargs
self.sock = gevent.socket.create_connection(self.endpoint, **kwargs)
self.sock.settimeout(self._config_timeout)
def send_message(self, message, expect_reply=True):
if not isinstance(message, basestring):
raise TypeError('str expected')
self.sock.send(message)
if expect_reply:
chunks = []
while True:
try:
data = self.sock.recv(self._config_buffer)
except socket.timeout:
log.debug('StreamClientTransport:socket timeout from server')
break
if not data:
break
chunks.append(data)
if len(data) < self._config_buffer:
break
response = ''.join(chunks)
return response
def close(self):
if self.sock is not None:
self.sock.close()
|
anhstudios/swganh | data/scripts/templates/object/tangible/medicine/crafted/shared_medpack_damage_e.py | Python | mit | 466 | 0.04721 | #### NOTICE: T | HIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/medicine/crafted/shared_medpack_damage_e.iff"
result.attribute_template_id = 7
result.stfName("medicine_name","medpack_damage_e")
#### BEGIN MODIFICATIONS ####
| #### END MODIFICATIONS ####
return result |
vmartinezf/ptavi-p2 | calc.py | Python | gpl-2.0 | 980 | 0.00102 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Vamos a programar una calculadora
import sys
# convierto el operando sea int o float a float
def numerical_float(operando):
return float(operando)
def suma(sum1, sum2):
return sum1 + sum2
def resta(rest1, rest2):
return rest1 - rest2
def resultado(o | peracion, num1, num2):
if (operacion == "suma") | :
result = suma(num1, num2)
elif (operacion == "resta"):
result = resta(num1, num2)
else:
sys.exit("Error: the operation is incorrect")
return result
if __name__ == "__main__":
try:
numero1 = sys.argv[1]
operacion = sys.argv[2]
numero2 = sys.argv[3]
except IndexError:
sys.exit("Error: The number of parameters is incorrect")
try:
num1 = numerical_float(numero1)
num2 = numerical_float(numero2)
except ValueError:
sys.exit("Error: Non numerical parameters")
print (resultado(operacion, num1, num2))
|
eaglgenes101/universalSmashSystem | engine/subactions/control/nextFrame.py | Python | gpl-3.0 | 339 | 0.00885 | from engine.subaction import *
# Go to the next frame in the action
class nextFrame(SubAction):
subact_group = 'Control'
fields = []
def execute(self, _action, _actor):
SubAct | ion.execute(self, _action, _actor)
_action.frame += 1
def getDisplayName(self):
| return 'Next Frame'
|
anhstudios/swganh | data/scripts/templates/object/mobile/shared_jabba_the_hutt.py | Python | mit | 445 | 0.047191 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR E | XAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_jabba_the_hutt.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","jabba_the_hutt")
#### BEGIN MODIFICATIONS ####
| #### END MODIFICATIONS ####
return result |
EdDev/vdsm | tests/network/func_net_basic_test.py | Python | gpl-2.0 | 11,637 | 0 | #
# Copyright 2016-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import os
import six
from nose.plugins.attrib import attr
from vdsm.network import errors as ne
from vdsm.network.link import iface as link_iface
from .netfunctestlib import NetFuncTestCase, NOCHK, SetupNetworksError
from .nettestlib import dummy_device, dummy_devices
from .nmnettestlib import iface_name, nm_connections, is_networkmanager_running
NETWORK_NAME = 'test-network'
NET_1 = NETWORK_NAME + '1'
NET_2 = NETWORK_NAME + '2'
VLANID = 100
class NetworkBasicTemplate(NetFuncTestCase):
__test__ = False
def test_add_net_based_on_nic(self):
with dummy_device() as nic:
NETCREATE = {NETWORK_NAME: {'nic': nic, 'switch': self.switch}}
with self.setupNetworks(NETCREATE, {}, NOCHK):
self.assertNetwork(NETWORK_NAME, NETCREATE[NETWORK_NAME])
def test_remove_net_based_on_nic(self):
with dummy_device() as nic:
NETCREATE = {NETWORK_NAME: {'nic': nic, 'switch': self.switch}}
NETREMOVE = {NETWORK_NAME: {'remove': True}}
with self.setupNetworks(NETCREATE, {}, NOCHK):
self.setupNetworks(NETREMOVE, {}, NOCHK)
self.assertNoNetwork(NETWORK_NAME)
def test_add_bridged_net_twice(self):
self._test_add_net_twice(bridged=True)
def test_add_bridgeless_net_twice(self):
self._test_add_net_twice(bridged=False)
def test_add_bridgeless_net_missing_nic_fails(self):
self._test_add_net_missing_nic_fails(bridged=False)
def test_add_bridged_net_missing_nic_fails(self):
self._test_add_net_missing_nic_fails(bridged=True)
def test_remove_missing_net_fails(self):
| NETREMOVE = {NETWORK_NAME: {'remove': True}}
with self.assertRaises(SetupNetworksError) as cm:
with self.setupNetworks(NETREMOVE, {}, NOCHK):
pass
self.assertEqual(cm.exception.status, ne.ERR_BAD_BRIDGE)
def test_add_net_based_on_vlan(self):
with dummy_device() as nic:
NETCREATE = {NETWORK_NAME: {'nic': nic, 'vlan': VLANID,
'switch': self.switch}}
| with self.setupNetworks(NETCREATE, {}, NOCHK):
self.assertNetwork(NETWORK_NAME, NETCREATE[NETWORK_NAME])
def test_remove_net_based_on_vlan(self):
with dummy_device() as nic:
NETCREATE = {NETWORK_NAME: {'nic': nic, 'vlan': VLANID,
'switch': self.switch}}
NETREMOVE = {NETWORK_NAME: {'remove': True}}
with self.setupNetworks(NETCREATE, {}, NOCHK):
self.setupNetworks(NETREMOVE, {}, NOCHK)
self.assertNoNetwork(NETWORK_NAME)
self.assertNoVlan(nic, VLANID)
def test_add_bridged_net_with_multiple_vlans_over_a_nic(self):
self._test_add_net_with_multiple_vlans_over_a_nic(bridged=True)
def test_add_bridgeless_net_with_multiple_vlans_over_a_nic(self):
self._test_add_net_with_multiple_vlans_over_a_nic(bridged=False)
def test_add_bridged_vlaned_and_non_vlaned_nets_same_nic(self):
self._test_add_vlaned_and_non_vlaned_nets_same_nic(bridged=True)
def test_add_bridgeless_vlaned_and_non_vlaned_nets_same_nic(self):
self._test_add_vlaned_and_non_vlaned_nets_same_nic(bridged=False)
def test_add_multiple_bridged_nets_on_the_same_nic_fails(self):
self._test_add_multiple_nets_fails(bridged=True)
def test_add_multiple_bridgeless_nets_on_the_same_nic_fails(self):
self._test_add_multiple_nets_fails(bridged=False)
def test_add_identical_vlan_id_bridged_nets_same_nic_fails(self):
self._test_add_multiple_nets_fails(bridged=True, vlan_id=VLANID)
def test_add_identical_vlan_id_bridgeless_nets_same_nic_fails(self):
self._test_add_multiple_nets_fails(bridged=False, vlan_id=VLANID)
def test_add_identical_vlan_id_bridged_nets_with_two_nics(self):
self._test_add_identical_vlan_id_nets_with_two_nics(bridged=True)
def test_add_identical_vlan_id_bridgeless_nets_with_two_nics(self):
self._test_add_identical_vlan_id_nets_with_two_nics(bridged=False)
def _test_add_net_with_multiple_vlans_over_a_nic(self, bridged):
VLAN_COUNT = 3
with dummy_device() as nic:
netsetup = {}
for tag in range(VLAN_COUNT):
netname = '{}{}'.format(NETWORK_NAME, tag)
netsetup[netname] = {'vlan': tag,
'nic': nic,
'switch': self.switch,
'bridged': bridged}
with self.setupNetworks(netsetup, {}, NOCHK):
for netname, netattrs in six.viewitems(netsetup):
self.assertNetwork(netname, netattrs)
def _test_add_vlaned_and_non_vlaned_nets_same_nic(self, bridged):
with dummy_device() as nic:
net_1_attrs = self._create_net_attrs(nic, bridged)
net_2_attrs = self._create_net_attrs(nic, bridged, VLANID)
self._assert_nets(net_1_attrs, net_2_attrs)
def _test_add_multiple_nets_fails(self, bridged, vlan_id=None):
with dummy_device() as nic:
net_1_attrs = net_2_attrs = self._create_net_attrs(
nic, bridged, vlan_id)
with self.setupNetworks({NET_1: net_1_attrs}, {}, NOCHK):
with self.assertRaises(SetupNetworksError) as cm:
with self.setupNetworks({NET_2: net_2_attrs}, {}, NOCHK):
pass
self.assertEqual(cm.exception.status, ne.ERR_BAD_PARAMS)
def _test_add_identical_vlan_id_nets_with_two_nics(self, bridged):
with dummy_devices(2) as (nic_1, nic_2):
net_1_attrs = self._create_net_attrs(nic_1, bridged, VLANID)
net_2_attrs = self._create_net_attrs(nic_2, bridged, VLANID)
self._assert_nets(net_1_attrs, net_2_attrs)
def _test_add_net_twice(self, bridged):
with dummy_device() as nic:
NETCREATE = {NETWORK_NAME: {'nic': nic,
'bridged': bridged,
'switch': self.switch}}
with self.setupNetworks(NETCREATE, {}, NOCHK):
self.setupNetworks(NETCREATE, {}, NOCHK)
self.assertNetwork(NETWORK_NAME, NETCREATE[NETWORK_NAME])
def _test_add_net_missing_nic_fails(self, bridged):
NETCREATE = {NETWORK_NAME: {'nic': 'missing_nic',
'bridged': bridged,
'switch': self.switch}}
with self.assertRaises(SetupNetworksError) as cm:
with self.setupNetworks(NETCREATE, {}, NOCHK):
pass
self.assertEqual(cm.exception.status, ne.ERR_BAD_NIC)
def _assert_nets(self, net_1_attrs, net_2_attrs):
with self.setupNetworks({NET_1: net_1_attrs}, {}, NOCHK):
with self.setupNetworks({NET_2: net_2_attrs}, {}, NOCHK):
self.assertNetwork(NET_1, net_1_attrs)
self.assertNetwork(NET_2, net_2_attrs)
def _create_net_attrs(self, nic, bridged, vlan_id=None):
attrs = {'nic': nic,
'bridged': bridged,
'switch': self.switch}
if vlan_id is no |
Calzzetta/Simple-Django-REST-Framework-Demo | simpledrf/urls.py | Python | mit | 825 | 0 | """simpledrf URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django. | contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.url | s),
url(r'^employee/', include('employee.urls')),
]
|
JH72/Embedded-Programming | Capstone2/med_simulator/device.py | Python | mit | 2,581 | 0.046881 | #device.py
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient as AWS #To connect to AWS IOT
#Initializes AWS IOT client
myAWS = AWS("asus-simon")
#Sets the url that is receiving the data
host = "a3f84gpf4vy7nu.iot.us-west-2.amazonaws.com"
portAWS = 8883
myAWS.configureEndpoint(host, portAWS)
#Configures credentials using paths of files
myAWS.configureCredentials("pem/root-CA.crt", "pem/asus-simon.private.key", "pem/asus-simon.cert.pem")
#Starts connection to AWS
myAWS.connect()
def customCallback(client, userdata, message):
print(json.loads(message.payload)["message"])
global state
state = False
import time #To get current time
import json #To manipulate JSON
import numpy
import wfdb
def intify(record):
recordData = []
for sample in record:
recordData.append(int(sample))
return recordData
def getWave(t0, dt):
global stop
fact | or = 360.0
tf = int((t0+dt) * factor)
t0 = int(t0 * factor)
fileName = "database/mitdb/100"
record = None
try:
record = wfdb.rdsamp(fileName, sampfrom=t0, sampto=tf, channels=[0,1], physical=False)
except ValueError:
print("ValueError1")
try:
record = wfdb.r | dsamp(fileName, sampfrom=t0, channels=[0,1], physical=False)
except ValueError:
print("ValueError2")
stop = True
return None
x = record.d_signals[:,0]
print("record = {}".format(record.d_signals[0,0]))
print(x[0])
if(x is None):
print("None error")
x_filtered = record.d_signals[:,1]
x = intify(x)
x_filtered = intify(x_filtered)
return {"d_signals": [x,x_filtered]}
def initSample():
fileName = "database/mitdb/100"
t0 = 0
tf = 3600
record = wfdb.rdsamp(fileName, sampfrom=t0, sampto=tf, channels=[0,1], physical=False)
data = {"d_signals": [intify(record.d_signals[:,0]),intify(record.d_signals[:,1])],
"fs": record.fs,
"adcgain": record.adcgain,
"adczero": record.adczero,
"signame": record.signame,
"units": record.units,
"baseline": record.baseline
}
dataJSON = json.dumps(data)
dataJSONString = str(dataJSON)
myAWS.publish("wfdb/init/start", dataJSONString, 0)
myAWS.subscribe("wfdb/down", 0, customCallback)
myAWS.subscribe("wfdb/init/stop", 0, customCallback)
global state
global stop
stop = False
state = True
t0 = 10
dt = 30
initSample()
while True:
print(t0)
if not state:
if(stop):
continue
testDict = getWave(t0,dt)
for key in testDict:
print(key)
if(stop):
continue
t0 += dt
dataJSON = json.dumps(testDict)
dataJSONString = str(dataJSON)
myAWS.publish("wfdb/up", dataJSONString, 0)
state = True
time.sleep(1)
|
JesseLivezey/plankton | pylearn2/datasets/semi_plankton.py | Python | bsd-3-clause | 3,794 | 0.006589 | """
Plankton dataset wrapper for semi-supervised networks.
"""
__authors__ = "Jesse Livezey"
import numpy as N
np = N
import cPickle, h5py, os
from theano.compat.six.moves import xrange
from pylearn2.datasets import semi_supervised, dense_design_matrix
from pylearn2.utils.rng import make_np_rng
from pylearn2.utils import serial
class SemiPlankton(semi_supervised.SemiSupervised):
"""
The National Data Science Bowl dataset
Parameters
----------
folder : str
Folder which contains data files.
which_set : str
'train', 'valid', or 'test'
seed : int
"""
def __init__(self, folder, which_set, seed):
self.args = locals()
rng = np.random.RandomState(seed)
self.rng = make_np_rng(rng, which_method=['permutation'])
if which_set not in ['train', 'valid', 'test']:
raise ValueError(
'Unrecognized which_set value "%s".' % (which_set,) +
'". Valid values are ["train","valid","test"].')
folder = serial.preprocess(folder)
with open(os.path.join(folder,'label_mapping.pkl'),'r') as f:
self.label_mapping = cPickle.load(f | )
with h5py.File(os.path.join(folder,'train.h5'), 'r') as f:
topo_view = f['X'].value[...,np.newaxis]/255.
y = f['y'].value
| self.ids = f['id'].value
n_examples = topo_view.shape[0]
perm = rng.permutation(n_examples)
topo_view = topo_view[perm]
y = y[perm]
with h5py.File(os.path.join(folder,'test.h5'), 'r') as f:
self.unlabeled = f['X'].value[...,np.newaxis]/255.
self.ids_unlabeled = f['id'].value
split = {'train': .8,
'valid': .1,
'test': .1}
assert np.all(np.array(split.values()) > 0.)
assert np.allclose(np.sum(split.values()), 1.)
n_test = int(split['test']*n_examples)
n_valid = int(split['valid']*n_examples)
n_train = n_examples-n_test-n_valid
train_topo_view = topo_view[:n_train]
if which_set == 'train':
topo_view = train_topo_view
y = y[:n_train]
elif which_set == 'valid':
topo_view = topo_view[n_train:n_train+n_valid]
y = y[n_train:n_train+n_valid]
else:
topo_view = topo_view[n_train+n_valid:]
y = y[n_train+n_valid:]
y = y[...,np.newaxis]
# Invert so background is 0.
topo_view = 1.-topo_view
unlabeled = 1.-unlabeled
# This does not work with data augmentation
#self.feature_mean = train_topo_view.mean(0)
#topo_view -= self.feature_mean
y_labels = max(self.label_mapping.values())+1
axes = ['b',0,1,'c']
view_converter = dense_design_matrix.DefaultViewConverter(topo_view.shape[1:], axes=axes)
L = topo_view.reshape(-1, np.prod(topo_view.shape[1:]))
unlabeled = self.unlabeled.reshape(-1, np.prod(self.unlabeled.shape[1:]))
if which_set == 'train':
X = L
else:
X = np.vstack((L, unlabeled))
super(SemiPlankton, self).__init__(X=X, V=X, L=L, y=y,
view_converter=view_converter,
y_labels=y_labels)
def get_test_set(self):
"""
.. todo::
WRITEME
"""
args = {}
args.update(self.args)
del args['self']
args['which_set'] = 'test'
return SemiPlankton(**args)
def get_valid_set(self):
"""
.. todo::
WRITEME
"""
args = {}
args.update(self.args)
del args['self']
args['which_set'] = 'valid'
return SemiPlankton(**args)
|
lucienfostier/gaffer | python/GafferTest/StringInOutNode.py | Python | bsd-3-clause | 2,820 | 0.026241 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL | DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF T | HE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
class StringInOutNode( Gaffer.ComputeNode ) :
def __init__( self, name="StringInOutNode", defaultValue="", substitutions = IECore.StringAlgo.Substitutions.AllSubstitutions ) :
Gaffer.ComputeNode.__init__( self, name )
self.addChild( Gaffer.StringPlug( "in", Gaffer.Plug.Direction.In, defaultValue, substitutions = substitutions ) )
self.addChild( Gaffer.StringPlug( "out", Gaffer.Plug.Direction.Out ) )
self.numHashCalls = 0
self.numComputeCalls = 0
def affects( self, input ) :
outputs = Gaffer.ComputeNode.affects( self, input )
if input.isSame( self["in"] ) :
outputs.append( self["out"] )
return outputs
def hash( self, output, context, h ) :
if output.isSame( self["out"] ) :
self["in"].hash( h )
self.numHashCalls += 1
def compute( self, plug, context ) :
if plug.isSame( self["out"] ) :
plug.setValue( self["in"].getValue() )
self.numComputeCalls += 1
IECore.registerRunTimeTyped( StringInOutNode, typeName = "GafferTest::StringInOutNode" )
|
shanemikel/beets | beetsplug/scrub.py | Python | mit | 5,220 | 0 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Cleans extraneous metadata from files' tags via a command or
automatically whenever tags are written.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
from beets import config
from beets import mediafile
_MUTAGEN_FORMATS = {
b'asf': b'ASF',
b'apev2': b'APEv2File',
b'flac': b'FLAC',
b'id3': b'ID3FileType',
b'mp3': b'MP3',
b'mp4': b'MP4',
b'oggflac': b'OggFLAC',
b'oggspeex': b'OggSpeex',
b'oggtheora': b'OggTheora',
b'oggvorbis': b'OggVorbis',
b'oggopus': b'OggOpus',
b'trueaudio': b'TrueAudio',
b'wavpack': b'WavPack',
b'monkeysaudio': b'MonkeysAudio',
b'optimfrog': b'OptimFROG',
}
scrubbing = False
class ScrubPlugin(BeetsPlugin):
"""Removes extraneous metadata from files' tags."""
def __init__(self):
super(ScrubPlugin, self).__init__()
self.config.add({
'auto': True,
})
self.register_listener("write", self.write_item)
def commands(self):
def scrub_func(lib, opts, args):
# This is a little bit hacky, but we set a global flag to
# avoid autoscrubbing when we're also explicitly scrubbing.
global scrubbing
scrubbing = True
# Walk through matching files and remove tags.
for item in lib.items(ui.decargs(args)):
self._log.info(u'scrubbing: {0}',
util.displayable_path(item.path))
# Get album art if we need to restore it.
if opts.write:
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
except IOError as exc:
self._log.error(u'could not open file to scrub: {0}',
exc)
art = mf.art
# Remove all tags.
self._scrub(item.path)
# Restore tags, if enabled.
if opts.write:
self._log.debug(u'writing new tags after scrub')
item.try_write()
if art:
self._log.info(u'restoring art')
mf = mediafile.MediaFile(util.syspath(item.path))
mf.art = art
mf.save()
scrubbing = False
scrub_cmd = ui.Subcommand('scrub', help='clean audio tags')
scrub_cmd.parser.add_option('-W', '--nowrite', dest='write',
action='store_false', default=True,
help='leave tags empty')
scrub_cmd.func = scrub_func
return [scrub_cmd]
@staticmethod
def _mutagen_classes():
"""Get a list of file type classes from the Mutagen module.
"""
classes = []
for modname, clsname in _MUTAGEN_FORMATS.items():
mod = __import__(b'mutagen.{0}'.format(modname),
fromlist=[clsname])
classes.append(getattr(mod, clsname))
return classes
def _scrub(self, path):
"""Remove all tags from a file.
"""
for cls in self._mutagen_classes():
# Try opening the file with this type, but just skip in the
# event of any error.
try:
f = cls(util.syspath(path))
except Exception:
continue
if f.tags is None:
continue
# Remove the tag for this type.
try:
f.delete()
except NotImplementedError: |
# Some Mutagen metadata subclasses (namely, ASFTag) do not
# support .delete(), presumably because it is impossible to
| # remove them. In this case, we just remove all the tags.
for tag in f.keys():
del f[tag]
f.save()
except IOError as exc:
self._log.error(u'could not scrub {0}: {1}',
util.displayable_path(path), exc)
def write_item(self, item, path, tags):
"""Automatically embed art into imported albums."""
if not scrubbing and self.config['auto']:
self._log.debug(u'auto-scrubbing {0}', util.displayable_path(path))
self._scrub(path)
|
hyunchel/redis-dump-load | tests/util.py | Python | bsd-2-clause | 1,620 | 0.005556 | import sys
py3 = sys.version_info[0] == 3
# python 2/3 compatibility
if py3:
# borrowed from six
def b(s):
'''Byte literal'''
return s.encode("latin-1")
def u(s):
'''Text literal'''
| return s
else:
# borrowed from six
def b(s):
'''Byte literal'''
return s
# Workaround for standalone backslash
def u(s):
'''Text literal'''
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
# backport for python 2.6
def get_subprocess_check_output():
import subprocess
try:
check_output = subprocess.check_output
except AttributeError:
# backport from python 2.7
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
return check_output
def with_temp_dir(fn):
import tempfile
import shutil
import functools
@functools.wraps(fn)
def decorated(self, *args, **kwargs):
dir = tempfile.mkdtemp()
try:
return fn(self, dir, *args, **kwargs)
finally:
shutil.rmtree(dir)
return decorated
| |
cuauv/software | vision/modules/deprecated/Bins_SVM.py | Python | bsd-3-clause | 12,569 | 0.004058 | from collections import namedtuple
import pickle
from math import sin, cos, radians
import os
import time
import cv2
import numpy as np
from caffew import caffe_classifier
import shm
from vision.modules import ModuleBase, gui_options
capture_source = 'downward'
vision_options = [gui_options.BooleanOption('debugging', True), gui_options.BooleanOption('cover_debugging', False),
gui_options.IntOption('l_min', 0, 0, 255), gui_options.IntOption('l_max', 127, 0, 255),
gui_options.IntOption('a_min', 114, 0, 255), gui_options.IntOption('a_max', 152, 0, 255),
gui_options.IntOption('b_min', 0, 0, 255), gui_options.IntOption('b_max', 166, 0, 255),
gui_options.IntOption('cover_l_min', 0, 0, 255), gui_options.IntOption('cover_l_max', 125, 0, 255),
gui_options.IntOption('cover_a_min', 115, 0, 255), gui_options.IntOption('cover_a_max', 130, 0, 255),
gui_options.IntOption('cover_b_min', 100, 0, 255), gui_options.IntOption('cover_b_max', 135, 0, 255),
gui_options.IntOption('erode_size', 2, 0, 50),
gui_options.IntOption('dilate_size', 2, 0, 50),
gui_options.IntOption('cover_min_area', 5000),
gui_options.IntOption('min_area', 5000),
gui_options.IntOption('min_invader_area', 1000),
gui_options.BooleanOption('training', False)]
shm_groups = (shm.shape_banana, shm.shape_lightning, shm.shape_bijection, shm.shape_soda)
shape_names = ("banana", "lightning", "bijection", "soda")
shm_group_dictionary = {"banana": shm.shape_banana,
"lightning": shm.shape_lightning,
"bijection": shm.shape_bijection,
"soda": shm.shape_soda}
OutputData = namedtuple("OutputData", ("p", "x", "y", "hd"))
data_dirname = os.path.dirname(os.path.realpath(__file__)) + "/../data/Bins/"
classifier_bson_filename = data_dirname + "/bins_caffe_classifier.bson"
svm_filename = data_dirname + "/bins_svm_classifier.pkl"
class Bins(ModuleBase.ModuleBase):
def __init__(self):
super(Bins, self).__init__(True)
try:
self.svm = pickle.load(open(svm_filename, "r"))
except IOError:
print("Sklearn pickle file could not be loaded")
self.svm = None
# try:
# self.classifier = caffe_classifier.CaffeClassifier(classifier_bson_filename)
# except IOError:
# print("Caffe bson file could not be loaded.")
# self.classifier = None
def process(self, mat):
self.post('orig', mat)
final = mat.copy()
original = cv2.cvtColor(mat, cv2.COLOR_BGR2LAB)
LabSpace = cv2.split(original)
lthreshed = cv2.inRange(LabSpace[0], self.options['l_min'], self.options['l_max'])
athreshed = cv2.inRange(LabSpace[1], self.options['a_min'], self.options['a_max'])
bthreshed = cv2.inRange(LabSpace[2], self.options['b_min'], self.options['b_max'])
cover_lthreshed = cv2.inRange(LabSpace[0], self.options['cover_l_min'], self.options['cover_l_max'])
cover_athreshed = cv2.inRange(LabSpace[1], self.options['cover_a_min'], self.options['cover_a_max'])
cover_bthreshed = cv2.inRange(LabSpace[2], self.options['cover_b_min'], self.options['cover_b_max'])
finalThreshed = lthreshed | ~athreshed
cover_finalThreshed = cover_athreshed & cover_bthreshed & cover_lthreshed
erodeSize = self.options['erode_size']
dilateSize = self.options['dilate_size']
erodeElement = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erodeSize * 2 + 1, erodeSize * 2 + 1),
(erodeSize, erodeSize))
dilateElement = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilateSize * 2 + 1, dilateSize * 2 + 1),
(dilateSize, dilateSize))
eroded = cv2.erode(finalThreshed, erodeElement)
dilated = cv2.dilate(eroded, dilateElement)
cover_eroded = cv2.erode(cover_finalThreshed, erodeElement)
cover_dilated = cv2.dilate(cover_eroded, dilateElement)
if self.options['debugging']:
self.post('L Threshed', lthreshed)
self.post('a Threshed', athreshed)
self.post('b Threshed', bthreshed)
self.post("Threshed", finalThreshed)
self.post("Masked", cv2.bitwise_and(mat, mat, mask=finalThreshed))
self.post("Eroded", eroded)
self.post("Eroded/Dilated", dilated.copy())
if self.options['cover_debugging']:
self.post('cover_L Threshed', cover_lthreshed)
self.post('cover_a Threshed', cover_athreshed)
self.post('cover_b Threshed', cover_bthreshed)
self.post("cover_Threshed", cover_finalThreshed)
self.post("cover_Masked", cv2.bitwise_and(mat, mat, mask=cover_finalThreshed))
self.post("cover_Eroded", cover_eroded)
self.post("cover_Eroded/Dilated", cover_dilated.copy())
_, cover_contours, _ = cv2.findContours(cover_dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if cover_contours:
if self.options['cover_debugging']:
cover_allContoursDrawing = np.copy(mat)
cv2.drawContours(cover_allContoursDrawing, cover_contours, -1, (255, 255, 0), 2)
self.post("Cover contours", cover_allContoursDrawing)
good_cover_contours = []
cover_contour = namedtuple("cover_contour", ("contour", "area", "rotated_rect", "probability"))
for c in cover_contours:
cover_area = cv2.contourArea(c)
if cover_area < self.options['cover_min_area']:
continue
rrect = cv2.minAreaRect(c)
probability = cover_area / (rrect[1][0] * rrect[1][1])
if probability > .8:
good_cover_contours.append(cover_contour(c, cover_area, rrect, probability))
if good_cover_contours:
best_cover_contour = max(good_cover_contours, key=lambda c: c.probability)
rrect = best_cover_contour.rotated_rect
if rrect[1][0] > rrect[1][1]:
rrect = (rrect[0], (rrect[1][1], rrect[1][0]), rrect[2] + 90)
if self.options['cover_debugging']:
cover_good_contours_drawing = np.copy(mat)
cv2.drawContours(cover_good_contours_drawing, [best_cover_contour.contour], -1, (255, 0, 0), 3)
scale = rrect[1][1] / 4
offset = (scale * sin(radians(rrect[2])), -scale * cos(radians(rrect[2])))
p1 = (int(offset[0] + rrect[0][0]), int(offset[1] + rrect[0][1]))
p2 = (int(-offset[0] + rrect[0][0]), int(-offset[1] + rrect[0][1]))
cv2.line(cover_good_contours_drawing, p1, p2, (0, 255), 2)
self.post("Good cover contours", cover_good_contours_drawing)
cv2.drawContours(final, [best_cover_contour.contour], -1, (255, 0, 0), 3)
scale = rrect[1][1] / 4
offset = (scale * sin(radians(rrect[2])), -scale * cos(radians(rrect[2])))
p1 = (int(offset[0] + rrect[0][0 | ]), int(offset[1] + rrect[0][1]))
| p2 = (int(-offset[0] + rrect[0][0]), int(-offset[1] + rrect[0][1]))
cv2.line(final, p1, p2, (0, 255), 2)
shm_group_cover = shm.shape_handle
shm_group_cover.p.set(best_cover_contour.probability)
shm_group_cover.x.set(int(rrect[0][0]))
shm_group_cover.y.set(int(rrect[0][1]))
shm_group_cover.hd.set(rrect[2])
else:
shm.shape_handle.p.set(0)
else:
shm.shape_handle.p.set(0)
_, contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# unmatched_bins = {"banana": shm.shape_banana,
# "lightning": shm.shape_lightn |
waldenilson/TerraLegal | project/servidor/restrito/contratoservidor.py | Python | gpl-2.0 | 6,591 | 0.017448 | from django.contrib.auth.decorators import login_required, permission_required,\
user_passes_test
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from project.tramitacao.forms import FormTipoCaixa
from django.http import HttpResponseRedirect
from project.tramitacao.models import Tbcontratoservidor
from django.contrib import messages
from project.tramitacao.admin import verificar_permissao_grupo
from django.http.response import HttpResponse
from project.tramitacao.relatorio_base import relatorio_csv_base, relatorio_ods_base,\
relatorio_ods_base_header, relatorio_pdf_base,\
relatorio_pdf_base_header_title, relatorio_pdf_base_header
from odslib import ODS
nome_relatorio = "relatorio_tipo_caixa"
response_consulta = "/sicop/restrito/tipo_caixa/consulta/"
titulo_relatorio = "Relatorio dos Tipos de Caixa"
planilha_relatorio = "Tipos de Caixa"
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def consulta(request):
if request.method == "POST":
nome = request.POST['nmcontrato']
lista = Tbcontratoservidor.objects.all()#.filter( nmtipocaixa__icontains=nome, tbdivisao__id = AuthUser.objects.get( pk = request.user.id ).tbdivisao.id )
else:
lista = Tbcontratoservidor.objects.all()
lista = lista.order_by( 'nmcontrato' )
#gravando na sessao o resultado da consulta preparando para o relatorio/pdf
request.session['relatorio_tipo_caixa'] = lista
return render_to_response('controle/servidor/contratoservidor/consulta.html' ,{'lista':lista}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_cadastro', login_url='/excecoes/permissao_negada/', raise_exception=True)
def cadastro(request):
if request.method == "POST":
next = request.GET.get('next', '/')
if validacao(request):
f_tipocaixa = Tbtipocaixa(
nmtipocaixa = request.POST['nmtipocaixa'],
desctipocaixa = request.POST['desctipocaixa'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipocaixa.save()
if next == "/":
return HttpResponseRedirect("/sicop/restrito/tipo_caixa/consulta/")
else:
return HttpResponseRedirect( next )
return render_to_response('sicop/restrito/tipo_caixa/cadastro.html',{}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_edicao', login_url='/excecoes/permissao_negada/', raise_exception=True)
def edicao(request, id):
instance = get_object_or_404(Tbtipocaixa, id=id)
if request.method == "POST":
if validacao(request):
f_tipocaixa = Tbtipocaixa(
id = instance.id,
nmtipocaixa = request.POST['nmtipocaixa'],
desctipocaixa = request.POST['desctipocaixa'],
tbdivisao = AuthUser.objects.get( pk = request.user.id ).tbdivisao
)
f_tipocaixa.save()
return HttpResponseRedirect("/sicop/restrito/tipo_caixa/edicao/"+str(id)+"/")
return render_to_response('sicop/restrito/tipo_caixa/edicao.html', {"tipocaixa":instance}, context_instance = RequestContext(request))
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_pdf(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(mimetype='application/pdf')
doc = relatorio_pdf_base_header(response, nome_relatorio)
elements=[]
dados = relatorio_pdf_base_header_title(titulo_relatorio)
dados.append( ('NOME','DESCRICAO') )
for obj in lista:
dados.append( ( obj.nmtipocaixa , obj.desctipocaixa ) )
return relatorio_pdf_base(response, doc, elements, dados)
else:
return HttpResponseRedirect(response_consulta)
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_ods(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
ods = ODS()
sheet = relatorio_ods_base_header(planilha_relatorio, titulo_relatorio, ods)
# subtitle
sheet.getCell(0, 1).setAlignHorizontal('center').stringValue( 'Nome' ).setFontSize('14pt')
sheet.getCell(1, 1).setAlignHorizontal('center').stringValue( 'Descricao' ).setFontSize('14pt')
sheet.getRow(1).setHeight('20pt')
#TRECHO PERSONALIZADO DE CADA CONSULTA
#DADOS
x = 0
for obj in lista:
sheet.getCell(0, x+2).setAlignHorizontal('center').stringValue(obj.nmtipocaixa)
sheet.getCell(1, x+2).setAlignHorizontal('center').stringValue(obj.desctipocaixa)
x += 1
#TRECHO PERSONALIZADO DE CADA CONSULTA
relatorio_ods_base(ods, planilha_relatorio)
# generating response
response = HttpResponse(mimetype=ods.mimetype.toString())
response['Content-Disposition'] = 'attachment; filename='+nome_relatorio+'.ods | '
ods.save(response)
return response
else:
return HttpR | esponseRedirect( response_consulta )
@permission_required('sicop.tipo_caixa_consulta', login_url='/excecoes/permissao_negada/', raise_exception=True)
def relatorio_csv(request):
# montar objeto lista com os campos a mostrar no relatorio/pdf
lista = request.session[nome_relatorio]
if lista:
response = HttpResponse(content_type='text/csv')
writer = relatorio_csv_base(response, nome_relatorio)
writer.writerow(['Nome', 'Descricao'])
for obj in lista:
writer.writerow([obj.nmtipocaixa, obj.desctipocaixa])
return response
else:
return HttpResponseRedirect( response_consulta )
def validacao(request_form):
warning = True
if request_form.POST['nmtipocaixa'] == '':
messages.add_message(request_form,messages.WARNING,'Informe um nome para o tipo caixa')
warning = False
return warning
|
getfleety/coralillo | coralillo/lua/__init__.py | Python | mit | 512 | 0.001953 | import os
SCRIPT_PATH = os.path.dirname(__file__)
class Lua:
def __init__(self, redis):
self.redis = redis
scripts = filter(lambda s: s.endswith('.lua'), os.listdir(SCRIPT_PATH))
for scriptname in scripts:
| with open(os.path.join(SCRIPT_PATH, scriptname)) as script:
setattr(self, scriptname.split('.')[0], redis.register_script(script.read()))
def register(self, name, contents):
setattr(self, name, self.redis.register_scri | pt(contents))
|
nnscr/nnscr.de | blog/migrations/0002_post_slug.py | Python | mit | 608 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 | on 2016-03-03 01:08
from __future__ import unicode_literals
from django.db import migrations, models
from blog.models import Post
def slugify_all_posts(*args):
for post in Post.objects.all():
post.save()
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default='', max_length=100),
| ),
migrations.RunPython(slugify_all_posts)
]
|
sio2project/oioioi | oioioi/exportszu/forms.py | Python | gpl-3.0 | 584 | 0 | fr | om django import forms
from django.utils.translation import ugettext_lazy as _
class ExportSubmissionsForm(forms.Form):
round = forms.ModelChoiceField(
queryset=None,
required=False,
empty_label=_("All rounds"),
label=_("Choose round"),
)
only_final = forms.BooleanField(
label=_("Only final submissions"), required=False, initial=True
)
def __in | it__(self, request, *args, **kwargs):
super(ExportSubmissionsForm, self).__init__(*args, **kwargs)
self.fields['round'].queryset = request.contest.round_set
|
ryanrdetzel/pjsip | tests/pjsua/scripts-sendto/160_err_duplicate_replaces.py | Python | gpl-2.0 | 552 | 0.016304 | # $Id: 160_err_duplicate_replaces.py 2066 2008-06-26 19:51:01Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0
"""
pjsu | a_args = "--null-audio --auto-answer 200"
extra_headers = "Replaces: abcd;from_tag=1\r\nReplaces: efgh;from_tag=2\r\n"
include = []
exclude = []
sendto_cfg = sip.SendtoCfg("D | uplicate replaces header", pjsua_args, sdp, 400,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
|
RedXBeard/chessQuest | chessQuest/__init__.py | Python | gpl-3.0 | 724 | 0.004144 | from chessQuest.movements import left_right_cross, right_left_cross, horizontal, vertical, knight_move
FREE = 0
FORBIDDEN = 1
KI | NG = 'K'
QUEEN = 'Q'
ROOK = 'R'
BISHOP = 'B'
KNIGHT = 'N'
PIECES_ORDER = [QUEEN, KING, ROOK, BISHOP, KNIGHT]
# Do not need an actual class for pieces.
PIECES = {
KING: {
'movements': [left_right_cross(1), right_left_cross(1), horizontal(1), vertical(1)],
},
QUEEN: {
'movements': [left_right_cross(), right_left_cross(), horizontal(), vertical()],
},
ROOK: {
'movements': [horizontal(), vertical()],
},
BISHOP: {
' | movements': [left_right_cross(), right_left_cross()],
},
KNIGHT: {
'movements': [knight_move]
}
}
|
sadig/DC2 | components/dc2-dhcp-appserver/dc2/dhcp/__init__.py | Python | gpl-2.0 | 908 | 0.001103 | # -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# M | ERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floo | r, Boston, MA 02110-1301 USA.
#
__import__('pkg_resources').declare_namespace(__name__)
|
darongliu/Lstm_Turing_LM_tf | exp_model_composition/fix/lstm+attention_fix+pretrained+entropy/reader.py | Python | mit | 7,849 | 0.009938 | """
modifier: Da-Rong Liu
Date: 2/01/2017
In this implementation, sentences of the same length are put into one bucket.
This helps to avoid doing padding.
Refenrence: https://github.com/ketranm/RMN/blob/master/text/TextProcessor.lua
"""
import collections
import os
import sys
import pickle
import numpy as np
from random import shuffle
import gensim
class data:
def __init__(self, data_dir, batch_size=100, min_seq_length=10, max_seq_length=50, min_count=2):
self.train_file = os.path.join(data_dir, 'train.txt' )
self.valid_file = os.path.join(data_dir, 'valid.txt' )
self.test_file = os.path.join(data_dir, 'test.txt' )
self.vocab_file = os.path.join(data_dir, 'data.vocab_min'+str(min_seq_length)+'max'+str(max_seq_length))
self.batch_size = batch_size
self.min_count = min_count
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.x = self.y = self.nbatch = self.all_tensor_data = None
if not (os.path.exists(self.vocab_file)):
print('vocab_file do not exist. Establishing vocab_file...')
self.vocab_to_id = self.establish_vocab()
else :
print('loading vocab_file...')
with open(self.vocab_file, 'rb') as f:
self.vocab_to_id = pickle.load(f)
self.vocab_size = len(self.vocab_to_id)
print 'vocab size: ', self.vocab_size
def load(self, mode):
if mode == 'train':
print('loading train text file...')
data = self.read_data(self.train_file)
elif mode == 'valid':
print('loading valid text file...')
data = self.read_data(self.valid_file)
elif mode == 'test':
print('loading test text file...')
data = self.read_data(self.test_file)
else:
print('mode must be train, valid, or test...')
sys.exit()
buckets = self.create_buckets(self.min_seq_length, self.max_seq_length, data)
self.all_tensor_data = self.text_to_tensor(buckets, self.vocab_to_id, self.min_seq_length, self.max_seq_length, data)
self.x, self.y, self.nbatch = self.generate_batch(self.batch_size, self.all_tensor_data)
def get_data(self, index) :
if not self.x :
print "still not load data..."
return None
else :
return [self.x[index], self.y[index]]
def get_batch_number(self) :
if not self.nbatch :
print "still not load data..."
return None
return self.nbatch
def shuffling_data(self):
if not self.x :
print "still not load data..."
else :
print "shuffling data..."
self.x, self.y, self.nbatch = self.generate_batch(self.batch_size, self.all_tensor_data)
def generate_word_embedding_matrix(self, path):
"""
generate vocab lookup embedding matrix from pretrained word2vector
args:
vocab_to_id:
path: model path
return:
embedding_matrix: pretrained word embedding matrix
"""
print 'generating word embedding matrix'
model = gensim.models.Word2Vec.load_word2vec_format(path, binary=True)
embedding_matrix = np.zeros([self.vocab_size,300],dtype='float')
for word, idx in self.vocab_to_id.iteritems():
try:
word_vector = model[ | word]
except:
word_vector = np.random.uniform(-1/np.sqrt(300),1/np.sqrt(300),[300])
embedding_matrix[idx] = word_vector
return embedding_matrix
""" -------STATIC METHOD------- """
def establish_vocab(self):
print('loading train text file...')
train_data = self.read_data | (self.train_file)
all_words = []
count = 0
for sentence in train_data :
words = sentence.split()
if len(words) <= self.max_seq_length and len(words) >= self.min_seq_length :
all_words = all_words + words
count += 1
print 'finish processing sentence'
print 'all sentences: ', len(train_data), ' suitable sentences: ', count
print 'creating vocabulary mapping...'
vocab_to_id = {}
counter = collections.Counter(all_words) #counter: dict {vocab:times}
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) #sort by ascending
for word, count in count_pairs :
if count >= self.min_count :
vocab_to_id[word] = len(vocab_to_id)
special_words = {"<unk>"} #used for any unknown words
for special_word in special_words:
if special_word not in vocab_to_id :
vocab_to_id[special_word] = len(vocab_to_id)
#save vocab file
with open(self.vocab_file,'wb') as f:
pickle.dump(vocab_to_id, f)
return vocab_to_id
def read_data(self,filename):
with open(filename, "r") as f:
content = f.readlines()
#return content
#add <s> and </s> at both end of the sentences
return ["<s> "+line+" </s>" for line in content]
def create_buckets(self, min_length, max_length, data) :
"""
count the number of each length of the sentences
data: list of sentences
buckets: dict {length: number}
"""
buckets = {}
for line in data :
words = line.split()
length = len(words)
if length <= max_length and length >= min_length :
if length in buckets :
buckets[length] = buckets[length] + 1
else :
buckets[length] = 1
return buckets
def text_to_tensor(self, buckets, vocab_to_id, min_length, max_length, data) :
"""
transform text data to tensor format
all_data: dict {length: the tensor of the length}
"""
all_data = {}
all_data_count = {}
for length, sentence_count in buckets.iteritems() :
all_data[length] = np.zeros([sentence_count,length],dtype='int')
all_data_count[length] = 0
for line in data :
words = line.split()
length = len(words)
if length <= max_length and length >= min_length :
count = 0
for word in words :
if word in vocab_to_id :
all_data[length][all_data_count[length]][count] = vocab_to_id[word]
else :
all_data[length][all_data_count[length]][count] = vocab_to_id["<unk>"]
count = count + 1
all_data_count[length] = all_data_count[length] + 1
return all_data
def generate_batch(self, batch_size, all_tensor_data):
"""
transform all tensor data into batch form
"""
all_data = {}
for length, tensor in all_tensor_data.iteritems():
all_data[length] = tensor
np.random.shuffle(all_data[length])
all_batch = []
for length, tensor in all_data.iteritems():
sentence_num = tensor.shape[0]
batch_num = sentence_num // batch_size
remaining = sentence_num - batch_num*batch_size
for i in range(batch_num) :
all_batch.append(all_data[length][i*batch_size:(i+1)*batch_size,:])
if remaining :
all_batch.append(all_data[length][batch_num*batch_size:,:])
all_shuffle_batch = all_batch
shuffle(all_shuffle_batch)
x = [tensor[:,:-1] for tensor in all_shuffle_batch]
y = [tensor[:,1:] for tensor in all_shuffle_batch]
num = len(all_shuffle_batch)
return x, y, num
|
weiawe/django | tests/timezones/tests.py | Python | bsd-3-clause | 54,664 | 0.001994 | from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import skipIf
from xml.dom.minidom import parseString
from django.contrib.auth.models import User
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt | .replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.asse | rtIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
|
shub0/algorithm-data-structure | python/strong_password_checker.py | Python | bsd-3-clause | 2,441 | 0.004506 | '''
A password is considered strong if below conditions are all met:
1. It has at least 6 characters and at most 20 characters.
2. It must contain at least one lowercase letter, at least one uppercase letter, and at least one digit.
3. It must NOT contain three repeating characters in a row ("...aaa..." is weak, but "...aa...a..." is strong, assuming other conditions are met).
Write a function strong | PasswordChecker(s), that takes a string s as input, and return the MINIMUM change required to make s a strong password. If s is already strong, return 0.
Insertion, deletion or replace of any one character are all considered as one change.
'''
class Solution(object):
def strongPasswordChecker(self, s):
| """
:type s: str
:rtype: int
"""
size = len(s)
missing_type = 3
if any('a' <= c <= 'z' for c in s):
missing_type -= 1
if any('A' <= c <= 'Z' for c in s):
missing_type -= 1
if any(c.isdigit() for c in s):
missing_type -= 1
delete_one = 0 # one deletion
delete_two = 0 # two deletion
replacement = 0
index = 2
while index < size:
if s[index] == s[index-1] == s[index-2]:
count = 2
while ( (index < size) and (s[index] == s[index-1]) ):
count += 1
index += 1
replacement += count / 3
if (count%3 == 0):
delete_one += 1
elif (count%3 == 1):
delete_two += 1
else:
index += 1
if size < 6:
return max(missing_type, 6 - size)
elif size <= 20:
return max(missing_type, replacement)
else:
delete = size - 20
replacement -= min(delete, delete_one)
replacement -= min(max(delete - delete_one, 0), delete_two*2) / 2
replacement -= max(delete - delete_one - 2 * delete_two, 0) / 3
return delete + max(missing_type, replacement)
solution = Solution()
print solution.strongPasswordChecker("abcdef")
print solution.strongPasswordChecker("aaa123")
print solution.strongPasswordChecker("aaa111")
print solution.strongPasswordChecker("aaaaaaaaaaaaaaaaaaaaa")
print solution.strongPasswordChecker("ABABABABABABABABABAB1")
print solution.strongPasswordChecker("..................!!!")
|
RHInception/talook | test/test_listhostshandler.py | Python | mit | 899 | 0 |
try:
import json
except ImportError:
import simplejson as json
from . import TestCase
from server import ListHostsHandler
class TestListHostsHandler(TestCase):
def setUp(self):
"""
Create an instance each time for testing.
"""
self.instance = ListHostsHandler()
def test_call(self):
"""
Verify running ListHostsHandler returns proper information.
"""
environ = {}
buf | fer = {}
def start_response(code, headers):
buffer['code'] = code
buffer['headers'] = headers
result = self.instance.__call__(environ, start_response)
assert buffer['code'] == '200 OK'
assert buffer['headers'] == [("Content-Type", "application/json")]
assert type(result) == str
results = json.loads(result)
assert results == self.instance._conf['hosts'] | |
adafruit/micropython | py/makemoduledefs.py | Python | mit | 3,304 | 0.000605 | #!/usr/bin/env python
# This pre-processor parses provided objects' c files for
# MP_REGISTER_MODULE(module_name, obj_module, enabled_define)
# These are used to generate a header with the required entries for
# "mp_rom_map_elem_t mp_builtin_module_table[]" in py/objmodule.c
from __future__ import print_function
import re
import os
import argparse
pattern = re.compile(
r"[\n;]\s*MP_REGISTER_MODULE\((.*?),\s*(.*?),\s*(.*?)\);",
flags=re.DOTALL
)
def find_c_file(obj_file, vpath):
""" Search vpaths for the c file that matches the provided object_file.
:param str obj_file: object file to find the matching c file for
:param List[str] vpath: List of base paths, similar to gcc vpath
:return: str path to c | file or None
"""
c_file = None
relative_c_file = os.path.splitext(obj_file)[0] + ".c"
relative_c_file = relative_c_file.lstrip('/\\')
for p in vpath:
possible_c_file = os.path.join(p, relative_c_file)
if os.path.exist | s(possible_c_file):
c_file = possible_c_file
break
return c_file
def find_module_registrations(c_file):
""" Find any MP_REGISTER_MODULE definitions in the provided c file.
:param str c_file: path to c file to check
:return: List[(module_name, obj_module, enabled_define)]
"""
global pattern
if c_file is None:
# No c file to match the object file, skip
return set()
with open(c_file) as c_file_obj:
return set(re.findall(pattern, c_file_obj.read()))
def generate_module_table_header(modules):
""" Generate header with module table entries for builtin modules.
:param List[(module_name, obj_module, enabled_define)] modules: module defs
:return: None
"""
# Print header file for all external modules.
mod_defs = []
print("// Automatically generated by makemoduledefs.py.\n")
for module_name, obj_module, enabled_define in modules:
mod_def = "MODULE_DEF_{}".format(module_name.upper())
mod_defs.append(mod_def)
print((
"#if ({enabled_define})\n"
" extern const struct _mp_obj_module_t {obj_module};\n"
" #define {mod_def} {{ MP_ROM_QSTR({module_name}), MP_ROM_PTR(&{obj_module}) }},\n"
"#else\n"
" #define {mod_def}\n"
"#endif\n"
).format(module_name=module_name, obj_module=obj_module,
enabled_define=enabled_define, mod_def=mod_def)
)
print("\n#define MICROPY_REGISTERED_MODULES \\")
for mod_def in mod_defs:
print(" {mod_def} \\".format(mod_def=mod_def))
print("// MICROPY_REGISTERED_MODULES")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--vpath", default=".",
help="comma separated list of folders to search for c files in")
parser.add_argument("files", nargs="*",
help="list of c files to search")
args = parser.parse_args()
vpath = [p.strip() for p in args.vpath.split(',')]
modules = set()
for obj_file in args.files:
c_file = find_c_file(obj_file, vpath)
modules |= find_module_registrations(c_file)
generate_module_table_header(sorted(modules))
if __name__ == '__main__':
main()
|
google/report2bq | application/classes/sa360_reports.py | Python | apache-2.0 | 3,492 | 0.012314 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = [
'davidharcombe@google.com (David Harcombe)'
]
import collections
import logging
from contextlib import suppress
from dataclasses import dataclass
from typing import Any, Dict
@dataclass
class SA360ReportParameter(object):
name: str
path: str
element_type: str = 'str'
is_list: bool = False
column_type: str = 'savedColumnName'
ordinal: int = None # Now deprecated
class SA360ReportTemplate(object):
def _update(self,
field: SA360ReportParameter,
original: Dict[str, Any],
new: Dict[str, Any]) -> Dict[str, Any]:
for key, val in new.items():
if isinstance(val, collections.Mapping):
tmp = self._update(field, original.get(key, { }), val)
original[key] = tmp
elif isinstance(val, list):
ordinal = 0
for _item in original[key]:
if [ value for value in _item.values() if value == field.name ]:
for k in _item.keys():
if _item[k] == field.name:
original[key][ordinal] = { field.column_type: val[0] }
ordinal += 1
else:
original[key] = new[key]
return original
def _insert(self,
data: Dict[Any, Any],
field: SA360ReportParameter,
value: Any) -> Dict[Any, Any]:
_path_elements = field.path.split('.')
_path_elements.reverse()
_data = None
if field.element_type == 'int':
_value = int(value)
e | lse:
_value = value
try:
for _element in _path_elements:
if not _data:
if field.is_list or field.ordinal:
_data = { _element: [_value] }
else:
_data = { _element: _value }
else:
_data = {_element: _data }
except KeyError as k:
logging.info(f'Error replacing {self.path}'
f'{("["+self.ordinal+"]") if self.ordinal else ""}'
' - not found in data.')
re | turn _data
def prepare(self,
template: Dict[str, Any],
values: Dict[str, Any]) -> Dict[str, Any]:
_parameters = template['parameters']
_report = template['report']
for _parameter in _parameters:
_param = SA360ReportParameter(**_parameter)
with suppress(KeyError):
if isinstance(values[_param.name], collections.Mapping):
value = values[_param.name]['value']
if 'type' in values[_param.name]:
_param.column_type = values[_param.name]['type']
else:
value = values[_param.name]
_new = self._insert(data=_report, field=_param, value=value)
_report = self._update(field=_param, original=_report, new=_new)
# Filter out blank column names
_columns = \
list(filter(lambda n: \
n.get('columnName', n.get('savedColumnName', '')) != \
'', _report['columns']))
_report['columns'] = _columns
return _report
|
beeftornado/sentry | tests/acceptance/test_project_user_feedback.py | Python | bsd-3-clause | 1,515 | 0.00198 | from __future__ import absolute_import
from django.utils import timezone
from sentry.testutils import AcceptanceTestCase
class ProjectUserFeedbackTest(AcceptanceTestCase):
def setUp(self):
super(ProjectUserFeedbackTest, self).setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.org, na | me="Mariachi Band", members=[self.user]
)
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.path | = u"/{}/{}/user-feedback/".format(self.org.slug, self.project.slug)
self.project.update(first_event=timezone.now())
def test(self):
group = self.create_group(project=self.project, message="Foo bar")
self.create_userreport(
date_added=timezone.now(),
group=group,
project=self.project,
event_id=self.event.event_id,
)
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.wait_until('[data-test-id="user-feedback-list"]')
self.browser.snapshot("project user feedback")
def test_empty(self):
self.browser.get(self.path)
self.browser.wait_until_not(".loading")
self.browser.wait_until('[data-test-id="user-feedback"]')
self.browser.snapshot("project user feedback - empty")
|
refeed/coala | coalib/bearlib/languages/definitions/Java.py | Python | agpl-3.0 | 325 | 0 | from coalib.bearli | b.languages.Language import Language
@Language
class Java:
extensions = '.java',
comment_delimiter = '//'
multiline_comment_delimiters = {'/*': '*/'}
string_delimiters = {'"': '"'}
multiline_string_delimiters = {}
indent | _types = {'{': '}'}
encapsulators = {'(': ')', '[': ']'}
|
windskyer/k_nova | paxes_nova/objects/network/dom_kvm.py | Python | apache-2.0 | 30,773 | 0 | #
# =================================================================
# =================================================================
class IPAddress(object):
"""
An IP Address stores the level 3 information about a logical ethernet port.
The data can be ipv4 or ipv6.
"""
def __init__(self, address):
self.address = address
def __eq__(self, other):
# Verify passed object is the same type
if type(self) is not type(other):
return False
return self.__dict__ == other.__dict__
def to_dict(self):
"""
Get a dict with this object's data. Example contents:
{
"ip-address":
{
"address": "9.10.11.12"
}
}
}
:returns: A dictionary.
"""
return {'ip-address': {'address': self.address}}
def from_dict(self, data_dict):
"""
Populate this object with data from a dictionary. See to_dict method
doc for expected format.
:param data_dict: A dictionary with the data to use.
"""
self.address = data_dict['ip-address']['address']
class LogicalEthernetPort(object):
"""
A Logical Ethernet Port is effectively a port that has two elements.
Something sitting on top of it, and some way to send traffic through it.
If Python had proper inter | faces, this class would be an interface.
"""
def __init__(self, name, state=None):
self.name = name
self.state = state
self.ip_addresses = []
def __eq__(self, other):
# Verify passed object is the same type
if type | (self) is not type(other):
return False
return self.__dict__ == other.__dict__
class PhysicalPort(LogicalEthernetPort):
"""
Represents a physical ethernet port on the system that a wire is connected
to. Typically, logical devices (bridges, bonds, vSwitches) are put on top
of these ports. However, they can also have IP Addresses put directly on
top of them, and if they do then the primary operating system can connect
through it.
To determine all of the physical ports on the system we will be utilizing
the lshw command.
"""
def __init__(self, name, state):
super(PhysicalPort, self).__init__(name, state)
self.speed_mb = 100
self.ip_addresses = []
def get_state(self):
return self.state
def get_speed_mb(self):
return self.speed_mb
def to_dict(self):
"""
Get a dict with this object's data. Example contents:
{
"physical-port":
{
"name": "eth0",
"speed_mb": 100,
"state": "Available",
"ip_addresses": []
}
}
}
:returns: A dictionary.
"""
ip_address_list = []
for ip_address in self.ip_addresses:
ip_address_list.append(ip_address.to_dict())
return {'physical-port': {'name': self.name,
'speed_mb': self.get_speed_mb(),
'state': self.get_state(),
'ip_addresses': ip_address_list}}
def from_dict(self, data_dict):
"""
Populate this object with data from a dictionary. See to_dict method
doc for expected format.
:param data_dict: A dictionary with the data to use.
"""
self.name = data_dict['physical-port']['name']
self.speed_mb = data_dict['physical-port']['speed_mb']
self.state = data_dict['physical-port']['state']
if 'ip_addresses' in data_dict['physical-port']:
for ip_dict in data_dict['physical-port']['ip_addresses']:
ip_obj = IPAddress(None)
ip_obj.from_dict(ip_dict)
self.ip_addresses.append(ip_obj)
class EthernetBond(LogicalEthernetPort):
"""
The Ethernet Bond is a low level Linux networking element that can be used
to bond multiple physical adapters together. Physical Ports can be added
to a bond. Options can be added to them (often distribution specific) to
define the bonding attributes - such as how to balance the traffic across
the bonds.
The bond takes multiple ports and presents them as a single logical entity
that can send traffic. It is nice because if one port goes down, the other
is there to enable traffic. It not only provides redundancy, it also
provides additional speed.
The bonds can be determined by listing the files in /proc/net/bonding/.
The main element that we wish to find in here is the Physical Ports within
it.
"""
def __init__(self, name):
super(EthernetBond, self).__init__(name)
self.port_list = []
def get_state(self):
return derive_state(self.port_list)
def get_speed_mb(self):
return total_speed(self.port_list)
def to_dict(self):
"""
Get a dict with this object's data. Example contents:
{
"ethernet-bond":
{
"name": "bond0",
"state": "Available",
"speed": "1000",
"ports": [],
"ip_addresses": []
}
}
}
:returns: A dictionary.
"""
ports = []
for port in self.port_list:
ports.append(port.to_dict())
ip_address_list = []
for ip_address in self.ip_addresses:
ip_address_list.append(ip_address.to_dict())
return {'ethernet-bond': {'name': self.name,
'state': self.get_state(),
'speed_mb': self.get_speed_mb(),
'ports': ports,
'ip_addresses': ip_address_list}}
def from_dict(self, data_dict):
"""
Populate this object with data from a dictionary. See to_dict method
doc for expected format.
:param data_dict: A dictionary with the data to use.
"""
self.name = data_dict['ethernet-bond']['name']
self.state = data_dict['ethernet-bond']['state']
self.port_list = []
if 'ports' in data_dict['ethernet-bond']:
for port_dict in data_dict['ethernet-bond']['ports']:
port_obj = PhysicalPort(None, None)
port_obj.from_dict(port_dict)
self.port_list.append(port_obj)
if 'ip_addresses' in data_dict['ethernet-bond']:
for ip_dict in data_dict['ethernet-bond']['ip_addresses']:
ip_obj = IPAddress(None)
ip_obj.from_dict(ip_dict)
self.ip_addresses.append(ip_obj)
class LinuxBridge(LogicalEthernetPort):
"""
A standard linux bridge is just an Ethernet Hub. It has elements sitting
on top of it, and devices within it. What the bridge does is broadcast to
all consumers the packets that come into it.
Bridges are incredibly popular in networking today because they are
lightweight, but since they broadcast every packet it does become expensive
once several VMs are running on that bridge (as the packet gets sent to all
hosts).
We will not support bridges as a connection device. However, we need to
understand which devices are backing a Bridge. This is where the brctl
command comes into use. The output of this should show us which Physical
Ports (well, really Logical Ethernet Ports) are backing the bridge. Those
are NOT candidates to add to the vSwitches.
"""
def __init__(self, name):
super(LinuxBridge, self).__init__(name)
self.port_list = []
def get_state(self):
return derive_state(self.port_list)
def get_speed_mb(self):
return slowest_speed(self.port_list)
def to_dict(self):
"""
Get a dict with this object's data. Example contents:
{
"linux-bridge":
{
|
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/encodings/koi8_r.py | Python | unlicense | 1,888 | 0.003519 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: koi8_r.py
""" Python Character Mapping Code | c koi8_r gene | rated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='koi8-r', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f─│┌┐└┘├┤┬┴┼▀▄█▌▐░▒▓⌠■∙√≈≤≥\xa0⌡°²·÷═║╒ё╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡Ё╢╣╤╥╦╧╨╩╪╫╬©юабцдефгхийклмнопярстужвьызшэщчъЮАБЦДЕФГХИЙКЛМНОПЯРСТУЖВЬЫЗШЭЩЧЪ'
encoding_table = codecs.charmap_build(decoding_table) |
X455u/SNLP | bin/python/sortclusters.py | Python | mit | 3,748 | 0.00587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import ConfigParser
import unicodecsv
import json
import sys
import re
### FUNCTIONS FOR COUNTING CLUSTER STATISTICS ###
def text_statistics(texts):
lencount = 0
minlen = sys.maxint
maxlen = 0
for t in texts:
l = len(t.split())
lencount += l
if l > maxlen:
maxlen = l
if l < minlen:
minlen = l
return (lencount / float(len(texts)), maxlen, minlen)
def sort_counts(counts, headers):
c = []
for i in range(len(counts)):
c.append((counts[i], headers[i]))
c.sort(reverse=True)
return c
###################
### MAIN STARTS ###
###################
if len(sys.argv) != 2:
print('Usage: python sortclusters.py <propertyfile>')
# opening the config file
conf_file = sys.argv[1]
config = ConfigParser.ConfigParser()
config.read(conf_file)
classes = []
# Forming the filename for the cluster files
clusterfile = config.get('clusters', 'clustercounts')
m = re.match( r'(.*)(\..*)', clusterfile)
c_beg = m.group(1)
c_end = m.group(2)
dataidx = config.get('clusters', 'dataidx')
clusterfile = c_beg + dataidx + c_end
# Reading the SOM cluster data
with open(clusterfile) as f:
csv_f = unicodecsv.reader(f, encoding='utf-8')
# Read the file and tokenize
for idx, row in enumerate(csv_f):
classes = row
# Transforming the data into numbers
classes = [int(i) for i in classes]
data_file = config.get('clusters', 'descriptions')
text_idx = json.loads(config.get('data', 'textdataidx'))[0]
# Initializing the clusters list
clusters = []
for i in range(max(classes)):
clusters.append([])
# Reading the text descriptions
with open(data_file) as f:
csv_f = unicodecsv.reader(f, encoding='utf-8')
for idx, row in enumerate(csv_f):
if idx == 0:
continue
c = classes[idx-1]
clusters[c-1].append(str(idx) + ": " + row[0])
### Reading the tdm matrix ###
tdmfile = config.get('data', 'termdocumentmatrix')
# Forming the right filename
m = re.match( r'(.*)(\..*)', tdmfile)
tdmfile = m.group(1) + dataidx + m.group(2)
# Terms of the tdm matrix
termheader = []
# Initializing the term cluster list
termclusters = []
with open(tdmfile) as f:
csv_f = unicodecsv.reader(f, encoding='utf-8')
for idx, row in enumerate(csv_f):
if idx == 0:
termheader = row
# initialize term cluster list
for i in range(max(classes)):
termclusters.append([0]*len(row))
continue
# summing the row to the counts of the righ cluster
cidx = classes[idx-1] -1
termclusters[cidx] = [termclusters[cidx][i] + int(row[i]) for i in range(len(row))]
# Writing the large clusters with their texts and | some statistics
with open(config.get('clusters', 'clustertexts'),"w") as f:
for i in range(len(clusters)):
c = clusters[i]
# Ignore filler clusters
if len(c) > 1:
# Writing cluster info
f.write("Cluster: " + str(i+1) + " Texts: " + str(len(c) | ) + "\n")
stats = text_statistics(c)
f.write("Avg len: " + str(stats[0]) + " Min len: " + str(stats[2]) + " Max len: " + str(stats[1]) + "\n" )
# Sort the term counts by amount
sortedcounts = sort_counts(termclusters[i], termheader)
for am, t in sortedcounts:
# Do not print with low counts
if am > 2:
f.write(str(am) + " : " + t.encode('utf-8') + "\n")
# Writing the text descriptions
for s in c:
f.write(s.encode('utf-8') + "\n")
f.write("\n----\n\n") |
ljx0305/ice | python/test/Ice/application/Client.py | Python | gpl-2.0 | 1,738 | 0.001726 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import sys, Ice, time
class Client(Ice.Application):
def interruptCallback(self, sig):
print("handling signal " + str(sig))
# SIGINT interrupts time.sleep so a custom method is needed to
# sleep for a given interval.
def sleep(self, interval):
start = time.time()
while True:
sleepTime = (start + interval) - time.time()
if sleepTime <= 0:
break
time.sleep(sleepTime)
def run(self, args):
self.ignoreInterrupt()
print("Ignore CTRL+C and the like for 5 seconds (try it!)")
self.sleep(5)
self.callbackOnInterrupt()
self.holdInterrupt()
print("Hold CTRL+C and the like for 5 seconds (try it!)")
self.sleep(5)
self.releaseInterrupt()
print("Release CTRL+C (any held signals should be released)")
self.sleep(5)
self.holdInterrupt()
print("Hold CTRL+C and the like for 5 seconds (try it!)")
self.sleep(5)
self.callbackOnInterrupt()
print("Release CTRL+C (any held signals should be rele | ased)")
self.sleep(5)
self.shutdownOnInterrupt()
print("Test shutdown on destroy. Press CT | RL+C to shutdown & terminate")
self.communicator().waitForShutdown()
print("ok")
return False
app = Client()
sys.exit(app.main(sys.argv))
|
ksmit799/Toontown-Source | toontown/hood/TrashcanInteractiveProp.py | Python | mit | 10,127 | 0.004542 | from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import InteractiveAnimatedProp
from toontown.hood import GenericAnimatedProp
from toontown.toonbase import ToontownGlobals, ToontownBattleGlobals, TTLocalizer
class TrashcanInteractiveProp(InteractiveAnimatedProp.InteractiveAnimatedProp):
notify = DirectNotifyGlobal.directNotify.newCategory('TrashcanInteractiveProp')
BattleCheerText = TTLocalizer.InteractivePropTrackBonusTerms[ToontownBattleGlobals.HEAL_TRACK]
ZoneToIdles = {ToontownGlobals.ToontownCentral: (('tt_a_ara_ttc_trashcan_idleTake2',
1,
1,
None,
3,
10),
('tt_a_ara_ttc_trashcan_idleHiccup0',
1,
1,
None,
3,
10),
('tt_a_ara_ttc_trashcan_idleLook1',
1,
1,
None,
3,
10),
('tt_a_ara_ttc_trashcan_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DonaldsDock: (('tt_a_ara_dod_trashcan_idleBounce2',
3,
10,
'tt_a_ara_dod_trashcan_idle0settle',
3,
10),
('tt_a_ara_dod_trashcan_idle0',
1,
1,
None,
3,
10),
('tt_a_ara_dod_trashcan_idle1',
1,
1,
None,
3,
10),
('tt_a_ara_dod_trashcan_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DaisyGardens: (('tt_a_ara_dga_trashcan_idleTake2',
1,
1,
None,
3,
10),
('tt_a_ara_dga_trashcan_idleHiccup0',
1,
1,
None,
3,
10),
('tt_a_ara_dga_trashcan_idleLook1',
1,
1,
None,
3,
10),
('tt_a_ara_dga_trashcan_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.MinniesMelodyland: (('tt_a_ara_mml_trashcan_idleBounce0',
3,
10,
'tt_a_ara_mml_trashcan_idle0settle',
3,
10),
('tt_a_ara_mml_trashcan_idleLook1',
1,
1,
None,
3,
10),
('tt_a_ara_mml_trashcan_idleHelicopter2',
1,
1,
None,
3,
10),
('tt_a_ara_mml_trashcan_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.TheBrrrgh: (('tt_a_ara_tbr_trashcan_idleShiver1',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_trashcan_idleSneeze2',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_trashcan_idle0',
1,
1,
None,
3,
10),
('tt_a_ara_tbr_trashcan_idleAwesome3',
1,
1,
None,
3,
10)),
ToontownGlobals.DonaldsDreamland: (('tt_a_ara_ddl_trashcan_idleSleep0',
3,
10,
None,
0,
0),
('tt_a_ara_ddl_trashcan_idleShake2',
1,
1,
None,
0,
0),
('tt_a_ara_ddl_trashcan_idleSnore1',
1,
1,
None,
| 0,
0),
('tt_a_ara_ddl_trashcan_idleAwesome3',
| 1,
1,
None,
0,
0))}
ZoneToIdleIntoFightAnims = {ToontownGlobals.ToontownCentral: 'tt_a_ara_ttc_trashcan_idleIntoFight',
ToontownGlobals.DonaldsDock: 'tt_a_ara_dod_trashcan_idleIntoFight',
ToontownGlobals.DaisyGardens: 'tt_a_ara_dga_trashcan_idleIntoFight',
ToontownGlobals.MinniesMelodyland: 'tt_a_ara_mml_trashcan_idleIntoFight',
ToontownGlobals.TheBrrrgh: 'tt_a_ara_tbr_trashcan_idleIntoFight',
ToontownGlobals.DonaldsDreamland: 'tt_a_ara_ddl_trashcan_idleIntoFight'}
ZoneToVictoryAnims = {ToontownGlobals.ToontownCentral: 'tt_a_ara_ttc_trashcan_victoryDance',
ToontownGlobals.DonaldsDock: 'tt_a_ara_dod_trashcan_victoryDance',
ToontownGlobals.DaisyGardens: 'tt_a_ara_dga_trashcan_vi |
atsaki/libcloud | libcloud/compute/types.py | Python | apache-2.0 | 9,475 | 0 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base types used by other parts of libcloud
"""
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.common.types import InvalidCredsError, InvalidCredsException
__all__ = [
"Provider",
"NodeState",
"DeploymentError",
"DeploymentException",
# @@TR: should the unused imports below be exported?
"LibcloudError",
"MalformedResponseError",
"InvalidCredsError",
"InvalidCredsException",
"DEPRECATED_RACKSPACE_PROVIDERS",
"OLD_CONSTANT_TO_NEW_MAPPING"
]
class Provider(object):
"""
Defines for each of the supported providers
:cvar DUMMY: Example provider
:cvar EC2_US_EAST: Amazon AWS US N. Virgina
:cvar EC2_US_WEST: Amazon AWS US N. California
:cvar EC2_EU_WEST: Amazon AWS EU Ireland
:cvar RACKSPACE: Rackspace next-gen OpenStack based Cloud Servers
:cvar RACKSPACE_FIRST_GEN: Rackspace First Gen Cloud Servers
:cvar GCE: Google Compute Engine
:cvar GOGRID: GoGrid
:cvar VPSNET: VPS.net
:cvar LINODE: Linode.com
:cvar VCLOUD: vmware vCloud
:cvar RIMUHOSTING: RimuHosting.com
:cvar ECP: Enomaly
:cvar IBM: IBM Developer Cloud
:cvar OPENNEBULA: OpenNebula.org
:cvar DREAMHOST: DreamHost Private Server
:cvar ELASTICHOSTS: ElasticHosts.com
:cvar CLOUDSIGMA: CloudSigma
:cvar NIMBUS: Nimbus
:cvar BLUEBOX: Bluebox
:cvar OPSOURCE: Opsource Cloud
:cvar DIMENSIONDATA: Dimension Data Cloud
:cvar NINEFOLD: Ninefold
:cvar TERREMARK: Terremark
:cvar EC2_US_WEST_OREGON: Amazon AWS US West 2 (Oregon)
:cvar CLOUDSTACK: CloudStack
:cvar CLOUDSIGMA_US: CloudSigma US Las Vegas
:cvar LIBVIRT: Libvirt driver
:cvar JOYENT: Joyent driver
:cvar VCL: VCL driver
:cvar KTUCLOUD: kt ucloud driver
:cvar GRIDSPOT: Gridspot driver
:cvar ABIQUO: Abiquo driver
:cvar NEPHOSCALE: NephoScale driver
:cvar EXOSCALE: Exoscale driver.
:cvar IKOULA: Ikoula driver.
:cvar OUTSCALE_SAS: Outscale SAS driver.
:cvar OUTSCALE_INC: Outscale INC driver.
:cvar PROFIT_BRICKS: ProfitBricks driver.
:cvar VULTR: vultr driver.
:cvar AZURE: Azure driver.
:cvar AURORACOMPUTE: Aurora Compute driver.
"""
AZURE = 'azure'
DUMMY = 'dummy'
EC2 = 'ec2_us_east'
RACKSPACE = 'rackspace'
GCE = 'gce'
GOGRID = 'gogrid'
VPSNET = 'vpsnet'
LINODE = 'linode'
VCLOUD = 'vcloud'
RIMUHOSTING = 'rimuhosting'
VOXEL = 'voxel'
SOFTLAYER = 'softlayer'
EUCALYPTUS = 'eucalyptus'
ECP = 'ecp'
IBM = 'ibm'
OPENNEBULA = 'opennebula'
DREAMHOST = 'dreamhost'
ELASTICHOSTS = 'elastichosts'
BRIGHTBOX = 'brightbox'
CLOUDSIGMA = 'cloudsigma'
NIMBUS = 'nimbus'
BLUEBOX = 'bluebox'
GANDI = 'gandi'
OPSOURCE = 'opsource'
DIMENSIONDATA = 'dimensiondata'
OPENSTACK = 'openstack'
SKALICLOUD = 'skalicloud'
SERVERLOVE = 'serverlove'
NINEFOLD = 'ninefold'
TERREMARK = 'terremark'
CLOUDSTACK = 'cloudstack'
LIBVIRT = 'libvirt'
JOYENT = 'joyent'
VCL = 'vcl'
KTUCLOUD = 'ktucloud'
GRIDSPOT = 'gridspot'
RACKSPACE_FIRST_GEN = 'rackspace_first_gen'
HOSTVIRTUAL = 'hostvirtual'
ABIQUO = 'abiquo'
DIGITAL_OCEAN = 'digitalocean'
NEPHOSCALE = 'nephoscale'
| CLOUDFRAMES = 'cloudframes'
EXOSCALE = 'exoscale'
IKOULA = 'ikoula'
OUTSCALE_SAS = 'outscale_sas'
OUTSCALE_INC = 'outscale_inc'
VSPHERE = 'vsphere'
PROFIT_BRICKS = 'profitbricks'
VULTR = 'vultr'
AURORACOMPUTE = 'aurora_compute'
CLOUDWATT = 'cloudwatt'
PACKET = 'packet'
RUNABOVE = 'runabove'
# OpenStack based providers
HPCLOUD = 'hpcloud'
CLOUDWATT = 'cloudwatt'
KILI = 'kili'
| ONAPP = 'onapp'
# Deprecated constants which are still supported
EC2_US_EAST = 'ec2_us_east'
EC2_EU = 'ec2_eu_west' # deprecated name
EC2_EU_WEST = 'ec2_eu_west'
EC2_US_WEST = 'ec2_us_west'
EC2_AP_SOUTHEAST = 'ec2_ap_southeast'
EC2_AP_NORTHEAST = 'ec2_ap_northeast'
EC2_US_WEST_OREGON = 'ec2_us_west_oregon'
EC2_SA_EAST = 'ec2_sa_east'
EC2_AP_SOUTHEAST2 = 'ec2_ap_southeast_2'
ELASTICHOSTS_UK1 = 'elastichosts_uk1'
ELASTICHOSTS_UK2 = 'elastichosts_uk2'
ELASTICHOSTS_US1 = 'elastichosts_us1'
ELASTICHOSTS_US2 = 'elastichosts_us2'
ELASTICHOSTS_US3 = 'elastichosts_us3'
ELASTICHOSTS_CA1 = 'elastichosts_ca1'
ELASTICHOSTS_AU1 = 'elastichosts_au1'
ELASTICHOSTS_CN1 = 'elastichosts_cn1'
CLOUDSIGMA_US = 'cloudsigma_us'
# Deprecated constants which aren't supported anymore
RACKSPACE_UK = 'rackspace_uk'
RACKSPACE_NOVA_BETA = 'rackspace_nova_beta'
RACKSPACE_NOVA_DFW = 'rackspace_nova_dfw'
RACKSPACE_NOVA_LON = 'rackspace_nova_lon'
RACKSPACE_NOVA_ORD = 'rackspace_nova_ord'
# Removed
# SLICEHOST = 'slicehost'
DEPRECATED_RACKSPACE_PROVIDERS = [Provider.RACKSPACE_UK,
Provider.RACKSPACE_NOVA_BETA,
Provider.RACKSPACE_NOVA_DFW,
Provider.RACKSPACE_NOVA_LON,
Provider.RACKSPACE_NOVA_ORD]
OLD_CONSTANT_TO_NEW_MAPPING = {
Provider.RACKSPACE: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_UK: Provider.RACKSPACE_FIRST_GEN,
Provider.RACKSPACE_NOVA_BETA: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_DFW: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_LON: Provider.RACKSPACE,
Provider.RACKSPACE_NOVA_ORD: Provider.RACKSPACE
}
class NodeState(object):
"""
Standard states for a node
:cvar RUNNING: Node is running.
:cvar REBOOTING: Node is rebooting.
:cvar TERMINATED: Node is terminated. This node can't be started later on.
:cvar STOPPED: Node is stopped. This node can be started later on.
:cvar PENDING: Node is pending.
:cvar STOPPED: Node is stopped.
:cvar SUSPENDED: Node is suspended.
:cvar ERROR: Node is an error state. Usually no operations can be performed
on the node once it ends up in the error state.
:cvar PAUSED: Node is paused.
:cvar UNKNOWN: Node state is unknown.
"""
RUNNING = 0
REBOOTING = 1
TERMINATED = 2
PENDING = 3
UNKNOWN = 4
STOPPED = 5
SUSPENDED = 6
ERROR = 7
PAUSED = 8
@classmethod
def tostring(cls, value):
values = cls.__dict__
values = dict([(key, string) for key, string in values.items() if
not key.startswith('__')])
for item_key, item_value in values.items():
if value == item_value:
return item_key
@classmethod
def fromstring(cls, value):
return getattr(cls, value.upper(), None)
class StorageVolumeState(object):
"""
Standard states of a StorageVolume
"""
AVAILABLE = "available"
ERROR = "error"
INUSE = "in_use"
CREATING = "creating"
DELETING = "deleting"
DELETED = "deleted"
BACKUP = "backup"
ATTACHING = "attaching"
UNKNOWN = "unknown"
class VolumeSnapshotState(object):
"""
Standard states of VolumeSnapshots
"""
AVAILABLE = 0
ERROR = 1
CREATING = 2
DELETING = 3
RESTORING = 4
UNKNOWN = 5
class Architecture(object):
"""
Image and size architectures.
:cvar I386: i386 (32 bt)
:cvar X86_64: x86_64 (64 bit)
"""
I386 = 0
X8 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/major_minor_demo1.py | Python | mit | 1,660 | 0.00241 | #!/usr/bin/env python
"""
Demonstrate how to use major and minor tickers.
The two relevant userland classes are Locators and Formatters.
Locators determine where the ticks are and formatters control the
formatting of ticks.
Minor ticks are off by default (NullLocator and NullFormatter). You
can turn minor ticks on w/o labels by setting the minor locator. You
can also turn labeling on for the minor ticker by setting the minor
formatter
Make a plot with major ticks that are multiples of 20 and minor ticks
that are multiples of 5. Label major ticks with %d f | ormatting but
don't label minor ticks
The MultipleLocator ticker class is used to place ticks on multiples of
some base. The FormatStrFormatter uses a string format string (eg
'%d' or '%1.2f' or '%1.1f cm' ) to format the tick
| The pylab interface grid command changes the grid settings of the
major ticks of the y and y axis together. If you want to control the
grid of the minor ticks for a given axis, use for example
ax.xaxis.grid(True, which='minor')
Note, you should not use the same locator between different Axis
because the locator stores references to the Axis data and view limits
"""
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d')
minorLocator = MultipleLocator(5)
t = arange(0.0, 100.0, 0.1)
s = sin(0.1*pi*t)*exp(-t*0.01)
ax = subplot(111)
plot(t,s)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_major_formatter(majorFormatter)
#for the minor ticks, use no labels; default NullFormatter
ax.xaxis.set_minor_locator(minorLocator)
show()
|
vishnu-kumar/PeformanceFramework | tests/unit/plugins/openstack/scenarios/cinder/test_utils.py | Python | apache-2.0 | 15,937 | 0 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally import exceptions
from rally.plugins.openstack.scenarios.cinder import utils
from tests.unit import fakes
from tests.unit import test
CINDER_UTILS = "rally.plugins.openstack.scenarios.cinder.utils"
CONF = cfg.CONF
class CinderScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CinderScenarioTestCase, self).setUp()
self.scenario = utils.CinderScenario(self.context)
def test__list_volumes(self):
return_volumes_list = self.scenario._list_volumes()
self.assertEqual(self.clients("cinder").volumes.list.return_value,
return_volumes_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_volumes")
def test__list_snapshots(self):
return_snapshots_list = self.scenario._list_snapshots()
self.assertEqual(
self.clients("cinder").volume_snapshots.list.return_value,
return_snapshots_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_snapshots")
def test__set_metadata(self):
volume = fakes.FakeVolume()
self.scenario._set_metadata(volume, sets=2, set_size=4)
calls = self.clients("cinder").volumes.set_metadata.call_args_list
self.assertEqual(len(calls), 2)
for call in calls:
call_volume, metadata = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(metadata), 4)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.set_4_metadatas_2_times")
def test__delete_metadata(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"]
self.scenario._delete_metadata(volume, keys, deletes=3, delete_size=4)
calls = self.clients("cinder").volumes.delete_metadata.call_args_list
self.assertEqual(len(calls), 3)
all_deleted = []
for call in calls:
call_volume, del_keys = call[0]
self.assertEqual(call_volume, volume)
self.assertEqual(len(del_keys), 4)
for key in del_keys:
self.assertIn(key, keys)
self.assertNotIn(key, all_deleted)
all_deleted.append(key)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_4_metadatas_3_times")
def test__delete_metadata_not_enough_keys(self):
volume = fakes.FakeVolume()
keys = ["a", "b", "c", "d", "e"]
self.assertRaises(exceptions.InvalidArgumentsException,
self.scenario._delete_metadata,
volume, keys, deletes=2, delete_size=3)
def test__create_volume(self):
return_volume = self.scenario._create_volume(1)
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.moc | k.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_ | once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__create_volume_with_size_range(self, mock_random):
mock_random.randint.return_value = 3
return_volume = self.scenario._create_volume(
size={"min": 1, "max": 5},
display_name="TestVolume")
self.clients("cinder").volumes.create.assert_called_once_with(
3, display_name="TestVolume")
self.mock_wait_for.mock.assert_called_once_with(
self.clients("cinder").volumes.create.return_value,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
def test__update_volume(self):
fake_volume = mock.MagicMock()
volume_update_args = {"display_name": "_updated",
"display_description": "_updated"}
self.scenario.generate_random_name = mock.Mock()
self.scenario._update_volume(fake_volume, **volume_update_args)
self.clients("cinder").volumes.update.assert_called_once_with(
fake_volume,
display_name=self.scenario.generate_random_name.return_value,
display_description="_updated")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.update_volume")
def test__delete_volume(self):
cinder = mock.Mock()
self.scenario._delete_volume(cinder)
cinder.delete.assert_called_once_with()
self.mock_wait_for_status.mock.assert_called_once_with(
cinder,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
def test__extend_volume_with_size_range(self, mock_random):
volume = mock.Mock()
mock_random.randint.return_value = 3
self.clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5})
volume.extend.assert_called_once_with(volume, 3)
self.mock_wait_for.mock.assert_called_once_with(
volume,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.mock_resource_is.mock.assert_called_once_with("available")
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
def test__extend_volume(self):
volume = mock.Mock()
self.clients("cinder").volumes.extend.return_ |
ixs/dss_cli | DSS_Scraper.py | Python | gpl-2.0 | 28,370 | 0.005886 | #!/usr/bin/python
#
# A Python module to control an Open-E DSS Filer.
# Copyright (C) 2013 Andreas Thienemann <andreas@bawue.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the version 2 of the GNU General Public License
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import urllib
import urlparse
import HTMLParser
import logging
import operator
import pprint
import random
import re
import sys
import mechanize
import cookielib
import BeautifulSoup
class DSS_Scraper:
def __init__(self, server, password, debug = False):
r"""Initiate defaults"""
self.server = server
self.password = password
self.debug = debug
self.allowed_cmds = [ "volume_replication_remove", "volume_replication_mode", "volume_replication_task_create", "iscsi_target_access", "iscsi_target_remove",
"failover_task", "nas_share_toggle_smb", "volume_replication_task_stop", "volume_replication_task_remove", "volume_replication_task_status", "lv_remove",
"failover_safe"]
# Logging
if debug == True:
self.logger = logging.getLogger("mechanize")
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.logger.setLevel(logging.DEBUG)
# Browser
self.br = mechanize.Browser()
if debug == True:
self.br.set_debug_http(True)
self.br.set_debug_responses(False)
self.br.set_debug_redirects(True)
# Cookie Jar
self.cj = cookielib.LWPCookieJar()
self.br.set_cookiejar(self.cj)
def login(self):
r"""Login into remote Open-E DSS Server"""
self.br.open("%s/" % (self.server,))
self.soup = BeautifulSoup.BeautifulSoup(self.br.response().read())
# Need to login
if filter(lambda x:x[1] == 'login_form', self.soup('form')[0].attrs)[0][1] == "login_form":
self.br.select_form(nr=0)
self.br["set_user"] = ["full_access"]
self.br["password"] = self.password
self.br.submit()
self.soup = BeautifulSoup.BeautifulSoup(self.br.response().read())
# Build a list of all addresses
for script in self.soup('script'):
if "Address" in script.text:
# Now extract both Addresses and MenuStruct JSON construct
# Simple top down parser
struct = None
for line in script.text.split('\n'):
if line == 'Addresses = {':
struct = "{"
continue
if line == "}":
struct += line
self.addresses = json.loads(struct)
continue
if line == 'MenuStruct = [':
struct = "["
continue
if line == ']':
struct += line
self.menustruct = json.loads(struct)
continue
if struct is not None:
struct += line
def logout(self):
r"""Logout of remote Open-E DSS Filer"""
self.br.open("%s/?logout=1" % (self.server,))
self.br.close()
def fetch_message_index(self):
r"""Fetch index of messages"""
self.br.open("%s/error.php" % (self.server,))
self.br.open("%s/status.php?status=logsXML&select=messages" % (self.server,))
print self.br.response().read()
def fetch_message(self, msgid):
r"""Fetch a message by ID"""
self.br.open("%s/error.php?groupId=%s" % (self.server, msgid))
print self.br.response().read()
def parse_pageData(self, msg):
r"""Parse Javascript returned by server and extract the JSON pageData contruct"""
for line in msg.split("\n"):
if line.startswith("this.pageData"):
return json.loads(line.split(" ", 2)[2][:-1])
def module_list(self, id):
r"""Output JSON construct detailing the items on a module page"""
self.br.open("%s/XhrModuleLoader.php?opt=%s&id=%s&__rand=%f" % (self.server, "list", id, random.random()))
return self.parse_pageData(self.br.response().read())
def module_display(self, moduleName, id, **args):
r"""Output the HTML code for an item on a module page"""
if len(args) > 0:
self.br.open("%s/XhrModuleLoader.php?opt=%s&_moduleName=%s&id=%s&%s&__rand=%s" % (self.se | rver, "disp", moduleName, id, urllib.urlencode(args), random.random()))
else:
self.br.open("%s/XhrModuleLoader.php?opt=%s&_moduleName=%s&id=%s&__rand=%s" % (self.server, "disp", moduleName, id, random.random()))
return self.br.response().read()
def tree_index(self, id):
r"""Fetch dataLink descriptions of the left tree pane(s) on a module page"""
| module = self.module_list(id)
return dict(map(lambda x: (x["name"], x["dataLink"]), module["trees"]))
def tree_items(self, datalink):
r"""Parse the tree items included in a tree pane referenced by a dataLink URL"""
self.br.open("%s/%s&text=1&_rn=%s" % (self.server, datalink, random.random()))
ret = dict()
for line in self.br.response().read().split('\n'):
if line.startswith("ob = new WebFXTreeItem("):
# Extract URL, and parse that to get uid, type and name
items = urlparse.parse_qs(urlparse.urlparse(HTMLParser.HTMLParser().unescape(line.split("'", 5)[4][:-1]))[4])
ret[items["name"][0]] = dict()
for item in items:
if item not in ["module"]:
ret[items["name"][0]][item] = items[item][0]
return ret
def tree_list(self, items):
r"""Return the JSON construct detailing the items on a tree page. This is similar to the module_list() function."""
self.br.open("%s/XhrModuleLoader.php?opt=%s&%s&__rand=%s" % (self.server, "list", urllib.urlencode(items), random.random()))
return self.parse_pageData(self.br.response().read())
def tree_display(self, items):
r"""Display the tree page items"""
self.br.open("%s/XhrModuleLoader.php?opt=%s&%s&__rand=%s" % (self.server, "disp", urllib.urlencode(items), random.random()))
return self.br.response().read()
def remove_control_from_active_form(self, whitelist = [], blacklist = []):
if len(whitelist) > 0 and len(blacklist) == 0:
# Remove the controls not needed/wanted based on the whitelist
# For some reason, HTMLClientForm does not always correctly remove an item. This needs several iterations it seems.
clean = False
while clean == False:
for control in self.br.form.controls:
if control.name not in whitelist:
self.br.form.controls.remove(control)
clean = True
for control in self.br.form.controls:
if control.name not in whitelist:
clean = False
if len(whitelist) == 0 and len(blacklist) > 0:
# Remove the controls not needed/wanted based on the blacklist
# For some reason, HTMLClientForm does not always correctly remove an item. This needs several iterations it seems.
clean = False
while clean == False:
for control in self.br.form.controls:
if control.name in blacklist:
self.br.form.controls.remove(control)
clean = True
for control in self.br.form.controls:
|
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/sql/__init__.py | Python | mit | 1,376 | 0.00218 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
import azure.cli.command_modules.sql._help # pylint: disable=unused-import
class SqlCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
from azure.cli.core.profiles import Resou | rceType
sql_custom = CliCommandType(operations_tmpl='azure.cli.command | _modules.sql.custom#{}')
super(SqlCommandsLoader, self).__init__(cli_ctx=cli_ctx,
custom_command_type=sql_custom,
resource_type=ResourceType.MGMT_SQL)
def load_command_table(self, args):
from azure.cli.command_modules.sql.commands import load_command_table
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azure.cli.command_modules.sql._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = SqlCommandsLoader
|
gacarrillor/QGIS | tests/src/python/test_qgsserver_wfs.py | Python | gpl-2.0 | 39,125 | 0.003169 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer WFS.
From build dir, run: ctest -R PyQgsServerWFS -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'René-Luc Dhont'
__date__ = '19/09/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
# Needed on Qt 5 so that the serialization of XML is consistent among all executions
os.environ['QT_HASH_SEED'] = '1'
import re
import urllib.request
import urllib.parse
import urllib.error
from qgis.server import QgsServerRequest
from qgis.testing import unittest
from qgis.PyQt.QtCore import QSize
from qgis.core import (
QgsVectorLayer,
QgsFeatureRequest,
QgsExpression,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsCoordinateTransformContext,
QgsGeometry,
)
import osgeo.gdal # NOQA
from test_qgsserver import QgsServerTestBase
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = br'MAP=[^"]+|Content-Length: \d+|timeStamp="[^"]+"'
RE_ATTRIBUTES = br'[^>\s]+=[^>\s]+'
class TestQgsServerWFS(QgsServerTestBase):
"""QGIS Server WFS Tests"""
# Set to True in child classes to re-generate reference files for this class
regenerate_reference = False
def wfs_request_compare(self,
request, version='',
extra_query_string='',
reference_base_name=None,
project_file="test_project_wfs.qgs",
requestMethod=QgsServerRequest.GetMethod,
data=None):
project = self.testdata_path + project_file
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WFS&REQUEST=%s' % (
urllib.parse.quote(project), request)
if version:
query_string += '&VERSION=%s' % version
if extra_query_string:
query_string += '&%s' % extra_query_string
header, body = self._execute_request(
query_string, requestMethod=requestMethod, data=data)
self.assert_headers(header, body)
response = header + body
if reference_base_name is not None:
reference_name = reference_base_name
else:
reference_name = 'wfs_' + request.lower()
if version == '1.0.0':
reference_name += '_1_0_0'
reference_name += '.txt'
reference_path = self.testdata_path + reference_name
self.store_reference(reference_path, response)
f = open | (reference_path, 'rb')
expected = f.read()
f.close()
response = re.sub(RE_STRIP_UNCHECKABLE, b'', response)
expected = re.sub(RE_STRIP_UNCHECKABLE, b'', expected)
self.assertXMLEqual( | response, expected, msg="request %s failed.\n Query: %s" % (
query_string, request))
return header, body
def test_operation_not_supported(self):
qs = '?MAP=%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=NotAValidRequest' % urllib.parse.quote(self.projectPath)
self._assert_status_code(501, qs)
def test_project_wfs(self):
"""Test some WFS request"""
for request in ('GetCapabilities', 'DescribeFeatureType'):
self.wfs_request_compare(request)
self.wfs_request_compare(request, '1.0.0')
def wfs_getfeature_compare(self, requestid, request):
project = self.testdata_path + "test_project_wfs.qgs"
assert os.path.exists(project), "Project file not found: " + project
query_string = '?MAP=%s&SERVICE=WFS&VERSION=1.0.0&REQUEST=%s' % (
urllib.parse.quote(project), request)
header, body = self._execute_request(query_string)
if requestid == 'hits':
body = re.sub(br'timeStamp="\d+-\d+-\d+T\d+:\d+:\d+"',
b'timeStamp="****-**-**T**:**:**"', body)
self.result_compare(
'wfs_getfeature_' + requestid + '.txt',
"request %s failed.\n Query: %s" % (
query_string,
request,
),
header, body
)
def test_getfeature_invalid_typename(self):
project = self.testdata_path + "test_project_wfs.qgs"
# a single invalid typename
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"REQUEST": "GetFeature",
"TYPENAME": "invalid"
}.items())])
header, body = self._execute_request(qs)
self.assertTrue(b"TypeName 'invalid' could not be found" in body)
# an invalid typename preceded by a valid one
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"REQUEST": "GetFeature",
"TYPENAME": "testlayer,invalid"
}.items())])
header, body = self._execute_request(qs)
self.assertTrue(b"TypeName 'invalid' could not be found" in body)
def test_getfeature(self):
tests = []
tests.append(('nobbox', 'GetFeature&TYPENAME=testlayer'))
tests.append(
('startindex2', 'GetFeature&TYPENAME=testlayer&STARTINDEX=2'))
tests.append(('limit2', 'GetFeature&TYPENAME=testlayer&MAXFEATURES=2'))
tests.append(
('start1_limit1', 'GetFeature&TYPENAME=testlayer&MAXFEATURES=1&STARTINDEX=1'))
tests.append(
('srsname', 'GetFeature&TYPENAME=testlayer&SRSNAME=EPSG:3857'))
tests.append(('sortby', 'GetFeature&TYPENAME=testlayer&SORTBY=id D'))
tests.append(('hits', 'GetFeature&TYPENAME=testlayer&RESULTTYPE=hits'))
for id, req in tests:
self.wfs_getfeature_compare(id, req)
def test_getfeature_exp_filter(self):
# multiple filters
exp_filter = "EXP_FILTER=\"name\"='one';\"name\"='two'"
req = f"SRSNAME=EPSG:4326&TYPENAME=testlayer,testlayer&{exp_filter}"
self.wfs_request_compare(
"GetFeature", '1.0.0', req, 'wfs_getFeature_exp_filter_2')
def test_wfs_getcapabilities_100_url(self):
"""Check that URL in GetCapabilities response is complete"""
# empty url in project
project = os.path.join(
self.testdata_path, "test_project_without_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.assertEqual("onlineResource=\"?" in item, True)
# url well defined in query string
project = os.path.join(
self.testdata_path, "test_project_without_urls.qgs")
qs = "https://www.qgis-server.org?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.assertTrue(
"onlineResource=\"https://www.qgis-server.org?" in item, True)
# url well defined in project
project = os.path.join(
self.testdata_path, "test_project_with_urls.qgs")
qs = "?" + "&".join(["%s=%s" % i for i in list({
"MAP": urllib.parse.quote(project),
"SERVICE": "WFS",
"VERSION": "1.0.0",
"REQUEST": "GetCapabilities"
}.items())])
r, h = self._result(self._execute_request(qs))
for item in str(r).split("\\n"):
if "onlineResource" in item:
self.a |
evanhenri/RNN-Trading-Bot | pyTrader/file_check.py | Python | mit | 3,068 | 0.003259 | import os, sqlite3
from sys import exit
missing_auth_dir = ' neural-network-trading-bot/pyTrader/auth not found.\n' \
' auth directory has been created'
missing_data_dir = ' neural-network-trading-bot/pyTrader/data not found.\n' \
' data directory has been created'
missing_nonce_db = ' neural-network-trading-bot/pyTrader/auth/nonce.sqlite not found.\n' \
' First db record must contain nonce value\n' \
' expected by BTC-e API (0 if no prior trade\n' \
' API requests have been made). Nonce has been\n' \
' set to 1'
missing_credentials_json = ' neur | al-network-trading-bot/pyTrader/auth/api-credentials.json not found.\n' \
' Credentials json format should be\n' \
' {"api_secret": "your-account-secret", "api_key": "your-api-key"}'
missing_directory = []
missing_data = []
parent_dir = os.path.dirname(os.path.realpath('')) + '/'
nonce_file = 'auth/nonce.sqlite'
output_file = parent_dir + 'build/data/output.sqlite'
|
def notify_missing_directory():
for directory in missing_directory:
print(directory + '\n')
def notify_missing_data():
for files in missing_data:
print(files + '\n')
def check_data_files():
if not os.path.exists('auth'):
os.makedirs('auth')
missing_directory.append(missing_auth_dir)
else:
print('Detected auth directory')
if not os.path.exists(parent_dir + 'build/data'):
os.makedirs(parent_dir + 'build/data')
missing_directory.append(missing_data_dir)
else:
print('Detected data directory')
if not os.path.isfile('auth/api-credentials.json'):
with open('auth/api-credentials.json', 'w') as temp_api_cred:
temp_api_cred.write('{"api_secret": "your-account-secret-goes-here", "api_key": "your-api-key-goes-here"}')
temp_api_cred.close()
missing_data.append(missing_credentials_json)
else:
print('Detected api-credentials.json')
if not os.path.isfile('auth/nonce.sqlite'):
new_nonce_db = sqlite3.connect('auth/nonce.sqlite')
new_nonce_cursor = new_nonce_db.cursor()
new_nonce_cursor.execute('CREATE TABLE IF NOT EXISTS nonce (current_nonce INTEGER)' )
new_nonce_cursor.execute('INSERT INTO nonce (current_nonce) VALUES(1)')
new_nonce_db.commit()
new_nonce_db.close()
missing_data.append(missing_nonce_db)
else:
print('Detected nonce.sqlite')
if len(missing_directory) > 0 or len(missing_data) > 0:
print('\n\n**************Missing Data Detected**************\n')
if len(missing_directory) > 0:
notify_missing_directory()
if len(missing_data) > 0:
notify_missing_data()
print('*************************************************\n')
if len(missing_data) > 0:
exit(0)
else:
print('All required directories and files detected')
|
josenavas/QiiTa | qiita_pet/handlers/study_handlers/tests/test_ebi_handlers.py | Python | bsd-3-clause | 1,996 | 0 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import main
from os import remove, close
from os.path import exists
from tempfile import mkstemp
from h5py import File
from mock import Mock
from qiita_files.demux import to_hdf5
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.test.tornado_test_base import TestHandlerBase
from qiita_db.artifact import Artifact
from qiita_db.user import User
class TestEBISubmitHandler(TestHandlerBase):
# TODO: add tests for post function once we figure out how. Issue 567
def setUp(self):
super(TestEBISubmitHandler, self).setUp()
self._clean_up_files = []
def tearDown(self):
for fp in self._clean_up_files:
if exists(fp):
remove(fp)
def test_get(self):
demux_fp = [fp for _, fp, fp_type in Artifact(2).filepaths
if fp_type == 'preprocessed_demux'][0]
fd, fna_fp = mkstemp(suffix='_seqs.fna')
close(fd)
self._clean_up_files.extend([fna_fp, demux_fp])
with open(fna_fp, 'w') as f:
f.write('>a_1 X orig_bc=X new_bc=X bc_diffs=0\nCCC')
wit | h File(demux_fp, "w") as f:
to_hdf5(fna_fp, f)
BaseHandler.get_current_user = Mock(return_value=User("admin@foo.bar"))
response = self.get("/eb | i_submission/2")
self.assertEqual(response.code, 200)
def test_get_no_admin(self):
response = self.get("/ebi_submission/2")
self.assertEqual(response.code, 403)
def test_get_no_exist(self):
response = self.get('/ebi_submission/100')
self.assertEqual(response.code, 404)
if __name__ == "__main__":
main()
|
mscuthbert/abjad | abjad/tools/scoretools/test/test_scoretools_Measure_should_scale_contents.py | Python | gpl-3.0 | 544 | 0.001838 | # -*- encoding: utf-8 -*-
from abjad import *
def test_scoretools_Measure_should_scale_contents_01():
tuplet = Tuplet((2, 3), "c'8 d'8 e'8 f'8 g'8")
measure = Measure((5, 12), [tuplet], implicit_scaling=False)
assert systemtools.TestManager.c | ompare(
measure,
r'''
{
\time 5/12
\tweak #'edge-height #'(0.7 . 0)
\times 2/3 {
c'8
d'8
e | '8
f'8
g'8
}
}
'''
) |
Ali-aqrabawi/ezclinic | lib/djangae/contrib/gauth/backends.py | Python | mit | 482 | 0.006224 | """This file exists for backwards compatability.
Pleas | e use the separate backends found in either `djangae.contrib.gauth.datastore.backends` or
`djangae.contrib.gauth.sql.backends`.
"""
import warnings
from djangae.contrib.gauth.datastore.backends impo | rt AppEngineUserAPIBackend
warnings.warn(
'AppEngineUserAPI is deprecated. Please use the specific backends from gauth.datastore '
'or gauth.sql instead.'
)
class AppEngineUserAPI(AppEngineUserAPIBackend):
pass
|
weiting-chen/manila | contrib/tempest/tempest/api/share/admin/test_share_servers_negative.py | Python | apache-2.0 | 4,715 | 0 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2 | .0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permission | s and limitations
# under the License.
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from tempest.api.share import base
from tempest import clients_share as clients
from tempest import test
class ShareServersNegativeAdminTest(base.BaseSharesAdminTest):
@classmethod
def resource_setup(cls):
super(ShareServersNegativeAdminTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_list_share_servers_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.list_share_servers)
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_show_share_server_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.show_share_server,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_try_show_share_server_details_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.show_share_server_details,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_show_share_server_with_inexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.show_share_server,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_show_share_server_details_with_inexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.show_share_server_details,
'fake_id')
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_wrong_filter_key(self):
search_opts = {'fake_filter_key': 'ACTIVE'}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_wrong_filter_value(self):
search_opts = {'host': 123}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_status(self):
search_opts = {"status": data_utils.rand_name("fake_status")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_host(self):
search_opts = {"host": data_utils.rand_name("fake_host")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_project(self):
search_opts = {"project_id": data_utils.rand_name("fake_project_id")}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_list_share_servers_with_fake_share_network(self):
search_opts = {
"share_network": data_utils.rand_name("fake_share_network"),
}
servers = self.shares_client.list_share_servers(search_opts)
self.assertEqual(len(servers), 0)
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_share_server_with_nonexistent_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_server,
"fake_nonexistent_share_server_id")
@test.attr(type=["gate", "smoke", "negative", ])
def test_delete_share_server_with_member(self):
self.assertRaises(lib_exc.Forbidden,
self.member_shares_client.delete_share_server,
"fake_nonexistent_share_server_id")
|
samwhelp/demo-libchewing | python/prototype/chewing_userphrase_remove/20-module/main.py | Python | mit | 211 | 0.030303 | #!/usr/bin/e | nv python3
from chewing import userphrase
def main():
phrase = '內稽'
bopomofo = 'ㄋㄟˋ ㄐㄧ'
rtn = userphrase.remove(phrase, bopomofo)
pri | nt(rtn)
if __name__ == '__main__':
main()
|
Wintellect/WintellectWebinars | 2019-06-06-ten-tips-python-web-devs-kennedy/code/top_10_web_explore/ex02_ngrok/pypi_org/app.py | Python | apache-2.0 | 680 | 0 | import | os
import flask
import pypi_org.data.db_session as db_session
app = flask.Flask(__name__)
def main():
register_blueprints()
setup_db()
app.run(debug=True)
def setup_db():
db_file = os.path.join(
os.path.dirname(__file__),
'db',
'pypi.sqlite')
db_session.global_init(db_ | file)
def register_blueprints():
from pypi_org.views import home_views
from pypi_org.views import package_views
from pypi_org.views import cms_views
app.register_blueprint(package_views.blueprint)
app.register_blueprint(home_views.blueprint)
app.register_blueprint(cms_views.blueprint)
if __name__ == '__main__':
main()
|
goldsborough/your_app | setup.py | Python | mit | 1,451 | 0.024121 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py script for setuptools.
"""
import re
from setuptools import setup, find_packages
version = ''
with open('your_app/__init__.py') as init:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
init.read(), re.MULTILINE).group(1)
with open('README.rst') as readme:
long_description = readme.read()
requirements = [
# Your project's requirements
]
test_requirements = [
"unittest2==1.1.0",
"python-coveralls==2.5.0"
]
setup(
name='your_app',
version=version,
description='Your Python application.',
long_description=long_description,
author='You',
author_email='your@email',
url="https://github.com/goldsborough/your_app",
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Pr | ogramming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Pyt | hon :: 3.4',
],
keywords='your_app',
include_package_data=True,
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=requirements,
test_suite="tests",
tests_require=test_requirements
)
|
raviagarwal7/buck | docs/soy2html.py | Python | apache-2.0 | 3,176 | 0.000315 | #!/usr/bin/env python
#
# This script is designed to be invoked by soy2html.sh.
# Usage:
#
# buck/docs$ python soy2html.py <output_dir>
#
# This will write all of the static content to the specified output directory.
# You may want to verify that things worked correctly by running:
#
# python -m SimpleHTTPServer <output_dir>
#
# and then navigating to http://localhost:8000/.
#
# When this script is run, soyweb should already be running locally on port
# 9814 via ./docs/soyweb-prod.sh.
import os
import subprocess
import sys
import time
URL_ROOT = 'http://localhost:9814/'
def main(output_dir):
# Iterate over the files in the docs directory and copy them, as
# appropriate.
for root, dirs, files in os.walk('.'):
for file_name in files:
if file_name.endswith('.soy') and not file_name.startswith('__'):
# Strip the './' prefix, if appropriate.
if root.startswith('./'):
root = root[2:]
# Construct the URL where the .soy file is being served.
soy_file = file_name
html_file = root + '/' + soy_file[:-len('.soy')] + '.html'
url = URL_ROOT + html_file
copy_dest = ensure_dir(html_file, output_dir)
subprocess.check_call([
"curl", "--fail", "--output", copy_dest, url
])
elif (file_name == ".nojekyll" or
file_name == "CNAME" or
file_name.endswith('.css') or
file_name.endswith('.jpg') or
file_name.endswith('.js') or
file_name.endswith('.png') or
file_name.endswith('.gif') or
file_name.endswith('.html') or
file_name.endswith('.md') or
file_name.endswith('.svg') or
file_name.endswith('.ttf') or
file_name.endswith('.txt')):
# Copy the static resource to output_dir.
relative_path = os.path.join(root, file_name)
with open(relative_path) as resource_file:
resource = resource_file.read()
copy_to_output_dir(relative_path, output_dir, resource)
def ensure_dir(path, output_dir):
last_slash = path.rfind('/')
if last_slash != -1:
output_subdir = os.path.join(output_dir, path[:last_slash])
if not os.path.exists(output_subdir):
os.makedirs(output | _subdir)
return os.path.join(output_dir, path)
def copy_to_output_dir(path, output_dir, content):
output_file = ensure_dir(path, output_dir)
with open(output_file, 'w') as f:
f.write(content)
def pollForServerReady():
SERVER_START_POLL = 5
print 'Waiting for server to start.'
for _ in range(0, SERVER_START_POLL):
result = subprocess.call(['curl', '--fail', '-I', URL_ROOT])
if result == | 0:
return
time.sleep(1)
print 'Server failed to start after %s seconds.' % SERVER_START_POLL
if __name__ == '__main__':
output_dir = sys.argv[1]
pollForServerReady()
main(output_dir)
|
dparalen/dva | dva/test/testcase_03_running_services.py | Python | gpl-2.0 | 1,272 | 0.003931 | """ This module contains testcase_03_running_services test """
from testcase import Testcase
import yaml
class testcase_03_running_services(Testcase):
"""
Check for running services
| """
stages = ['stage1']
tags = ['default']
def test(self, connection, params):
""" Perform test """
prod = params['product'].upper()
ve | r = params['version']
with open(self.datadir + '/running_services.yaml') as expected_services_fd:
all_services = yaml.safe_load(expected_services_fd)
try:
expected_services = all_services['%s_%s' % (prod, ver)]
except KeyError:
self.log.append({
'result': 'skip',
'comment': 'unsupported region and/or product-version combination'})
return self.log
is_systemd = self.get_result(connection, 'rpm -q systemd > /dev/null && echo True || echo False')
if is_systemd == 'True':
for service in expected_services:
self.get_return_value(connection, 'systemctl is-active %s.service' % service)
else:
for service in expected_services:
self.ping_pong(connection, 'chkconfig --list %s' % service, '3:on')
return self.log
|
damoti/shipmaster | shipmaster/plugins/ssh/__init__.py | Python | bsd-3-clause | 54 | 0 | plugin_class = " | shipmaster.plugins.ssh.ssh.SS | HPlugin"
|
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/en/ultrahd.py | Python | gpl-2.0 | 6,083 | 0.013809 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Yoda
# Addon id: plugin.video.Yoda
# Addon Provider: Supremacy
import re,traceback,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import dom_parser2
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ultrahdindir.com']
self.base_link = 'http://ultrahdindir.com'
self.post_link = '/index.php?do=search'
def movie(self, imdb, title, localtitle, aliases, year):
try:
| url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('UltraHD - Exception: \n' + str( | failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title'].replace(':','').lower()
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.post_link)
post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)
r = client.request(url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
r = [(i[0].attrs['href'], i[0].content) for i in r if i]
hostDict = hostprDict + hostDict
for item in r:
try:
name = item[1]
y = re.findall('\((\d{4})\)', name)[0]
if not y == year: raise Exception()
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
s = s[0] if s else '0'
data = client.request(item[0])
data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
data[0].content, re.DOTALL)
u = [(i[0], i[1], s) for i in data if i]
for name, url, size in u:
try:
if '4K' in name:
quality = '4K'
elif '1080p' in name:
quality = '1080p'
elif '720p' in name:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts']
for i in name):
quality = 'CAM'
else: quality = '720p'
info = []
if '3D' in name or '.3D.' in url: info.append('3D'); quality = '1080p'
if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1]
div = 1 if size.endswith(('Gb', 'GiB', 'GB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):continue
if 'ftp' in url: host = 'COV'; direct = True;
else: direct = False; host= 'turbobit.net'
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en',
'url': url, 'info': info, 'direct': direct, 'debridonly': True})
except:
pass
except:
pass
return sources
except:
failure = traceback.format_exc()
log_utils.log('UltraHD - Exception: \n' + str(failure))
return sources
def resolve(self, url):
return url
|
alphagov/digitalmarketplace-api | migrations/versions/1280_tidy_up_latent.py | Python | mit | 633 | 0.004739 | """tidy_up_latent
Tidy up a latent migration not previously picked up by alembic or ignored.
Revision ID: 1280
Revises: | 1270
Create Date: 2019-06-10 15:51:48.661665
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1280'
down_revision = '1270'
def upgrade():
# this field not has a "unique index" instead of a "unique constraint"
op.drop_constraint('uq_direct_award_projects_external_id', 'direct_award_projects', type_='unique')
def downgrade():
op | .create_unique_constraint('uq_direct_award_projects_external_id', 'direct_award_projects', ['external_id'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.